Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support eager evaluation of all Kafka filters #212

Merged
merged 10 commits into from
Apr 26, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaConditionFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaEvaluation;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaFilterFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeaderFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeadersFW;
Expand Down Expand Up @@ -962,6 +963,93 @@ public String toString()
}
}

private static final class EagerOr extends KafkaFilterCondition
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As discussed, suggest creating a superclass Or and subsclasses LazyOr and EagerOr for improved readability.

Only the .test(...) method implementation will differ between LazyOr and EagerOr, so everything else can be implemented in the Or superclass.

{
private final List<KafkaFilterCondition> conditions;

private EagerOr(
List<KafkaFilterCondition> conditions)
{
this.conditions = conditions;
}

@Override
public int reset(
KafkaCacheSegment segment,
long offset,
long latestOffset,
int position)
{
int nextPositionMin = NEXT_SEGMENT_VALUE;

if (segment != null)
{
if (position == POSITION_UNSET)
{
final KafkaCacheIndexFile indexFile = segment.indexFile();
assert indexFile != null;
final int offsetDelta = (int)(offset - segment.baseOffset());
position = cursorValue(indexFile.floor(offsetDelta));
}

nextPositionMin = NEXT_SEGMENT_VALUE;
for (int i = 0; i < conditions.size(); i++)
{
final KafkaFilterCondition condition = conditions.get(i);
final int nextPosition = condition.reset(segment, offset, latestOffset, position);
nextPositionMin = Math.min(nextPosition, nextPositionMin);
}
}

return nextPositionMin;
}

@Override
public int next(
int position)
{
int nextPositionMin = NEXT_SEGMENT_VALUE;
int nextPositionMax = RETRY_SEGMENT_VALUE;
for (int i = 0; i < conditions.size(); i++)
{
final KafkaFilterCondition condition = conditions.get(i);
final int nextPosition = condition.next(position);
if (nextPosition != RETRY_SEGMENT_VALUE)
{
nextPositionMin = Math.min(nextPosition, nextPositionMin);
}
nextPositionMax = Math.max(nextPosition, nextPositionMax);
}

if (nextPositionMax == RETRY_SEGMENT_VALUE)
{
nextPositionMin = RETRY_SEGMENT_VALUE;
}

return nextPositionMin;
}

@Override
public boolean test(
KafkaCacheEntryFW cacheEntry)
{
boolean accept = false;
for (int i = 0; i < conditions.size(); i++)
{
final KafkaFilterCondition condition = conditions.get(i);
accept |= condition.test(cacheEntry);
// TODO: Update cacheEntry.filters() here based on the conditions matched
}
return accept;
}

@Override
public String toString()
{
return String.format("%s%s", getClass().getSimpleName(), conditions);
}
}

private static DirectBuffer copyBuffer(
DirectBuffer buffer,
int index,
Expand Down Expand Up @@ -990,7 +1078,8 @@ private static int computeHash(
}

public KafkaFilterCondition asCondition(
ArrayFW<KafkaFilterFW> filters)
ArrayFW<KafkaFilterFW> filters,
KafkaEvaluation evaluation)
{
KafkaFilterCondition condition;
if (filters.isEmpty())
Expand All @@ -1001,7 +1090,14 @@ public KafkaFilterCondition asCondition(
{
final List<KafkaFilterCondition> asConditions = new ArrayList<>();
filters.forEach(f -> asConditions.add(asCondition(f)));
condition = asConditions.size() == 1 ? asConditions.get(0) : new KafkaFilterCondition.Or(asConditions);
if (KafkaEvaluation.EAGER == evaluation)
{
condition = asConditions.size() == 1 ? asConditions.get(0) : new KafkaFilterCondition.EagerOr(asConditions);
}
else
{
condition = asConditions.size() == 1 ? asConditions.get(0) : new KafkaFilterCondition.Or(asConditions);
}
jfallows marked this conversation as resolved.
Show resolved Hide resolved
}
return condition;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.ArrayFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaEvaluation;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaFilterFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeaderFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaIsolation;
Expand Down Expand Up @@ -196,6 +197,7 @@ public MessageConsumer newStream(
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final ArrayFW<KafkaFilterFW> filters = kafkaFetchBeginEx.filters();
final KafkaEvaluation evaluation = kafkaFetchBeginEx.evaluation().get();
final KafkaIsolation isolation = kafkaFetchBeginEx.isolation().get();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
Expand Down Expand Up @@ -236,7 +238,7 @@ public MessageConsumer newStream(
fanout = newFanout;
}

final KafkaFilterCondition condition = cursorFactory.asCondition(filters);
final KafkaFilterCondition condition = cursorFactory.asCondition(filters, evaluation);
final long latestOffset = kafkaFetchBeginEx.partition().latestOffset();
final KafkaOffsetType maximumOffset = KafkaOffsetType.valueOf((byte) latestOffset);
final Int2IntHashMap leadersByPartitionId = cacheRoute.supplyLeadersByPartitionId(topicName);
Expand Down Expand Up @@ -1218,6 +1220,7 @@ private void doClientReplyData(

final long partitionOffset = nextEntry.offset$();
final long timestamp = nextEntry.timestamp();
final long filters = nextEntry.filters();
final long ownerId = nextEntry.ownerId();
final int entryFlags = nextEntry.flags();
final KafkaKeyFW key = nextEntry.key();
Expand Down Expand Up @@ -1289,7 +1292,7 @@ private void doClientReplyData(
switch (flags & ~FLAG_SKIP)
{
case FLAG_INIT | FLAG_FIN:
doClientReplyDataFull(traceId, timestamp, ownerId, key, headers, deltaType, ancestor, fragment,
doClientReplyDataFull(traceId, timestamp, ownerId, filters, key, headers, deltaType, ancestor, fragment,
reserved, flags, partitionId, partitionOffset, stableOffset, latestOffset);
break;
case FLAG_INIT:
Expand Down Expand Up @@ -1322,6 +1325,7 @@ private void doClientReplyDataFull(
long traceId,
long timestamp,
long producerId,
long filters,
KafkaKeyFW key,
ArrayFW<KafkaHeaderFW> headers,
KafkaDeltaType deltaType,
Expand All @@ -1340,6 +1344,7 @@ private void doClientReplyDataFull(
.typeId(kafkaTypeId)
.fetch(f -> f.timestamp(timestamp)
.producerId(producerId)
.filters(filters)
.partition(p -> p.partitionId(partitionId)
.partitionOffset(partitionOffset)
.stableOffset(stableOffset)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaAckMode;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaEvaluation;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaFilterFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeaderFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaKeyFW;
Expand Down Expand Up @@ -532,7 +533,10 @@ private KafkaCacheClientProduceFan(
this.topicName = topicName;
this.members = new Long2ObjectHashMap<>();
this.defaultOffset = KafkaOffsetType.LIVE;
this.cursor = cursorFactory.newCursor(cursorFactory.asCondition(EMPTY_FILTER), KafkaDeltaType.NONE);
this.cursor = cursorFactory.newCursor(
cursorFactory
.asCondition(EMPTY_FILTER, KafkaEvaluation.LAZY),
KafkaDeltaType.NONE);

partition.newHeadIfNecessary(0L);

Expand Down Expand Up @@ -1158,7 +1162,10 @@ private final class KafkaCacheClientProduceStream
long leaderId,
long authorization)
{
this.cursor = cursorFactory.newCursor(cursorFactory.asCondition(EMPTY_FILTER), KafkaDeltaType.NONE);
this.cursor = cursorFactory.newCursor(
cursorFactory
.asCondition(EMPTY_FILTER, KafkaEvaluation.LAZY),
KafkaDeltaType.NONE);
this.entryMark = new MutableInteger(0);
this.position = new MutableInteger(0);
this.fan = fan;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaAckMode;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaEvaluation;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaFilterFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeaderFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaKeyFW;
Expand Down Expand Up @@ -1023,7 +1024,10 @@ private final class KafkaCacheServerProduceStream
KafkaCachePartition partition)
{
this.partition = partition;
this.cursor = cursorFactory.newCursor(cursorFactory.asCondition(EMPTY_FILTER), KafkaDeltaType.NONE);
this.cursor = cursorFactory.newCursor(
cursorFactory
.asCondition(EMPTY_FILTER, KafkaEvaluation.LAZY),
KafkaDeltaType.NONE);
this.fan = fan;
this.sender = sender;
this.originId = originId;
Expand Down
1 change: 1 addition & 0 deletions runtime/binding-kafka/src/main/zilla/internal.idl
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ scope internal
kafka::KafkaHeader[] trailers;
uint32 paddingLen;
octets[paddingLen] padding;
int64 filters;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change needs to be reverted as KafkaCacheEntry is shared across multiple readers, whereas filters mask applies to each individual cache client stream.

}

struct KafkaCacheDelta
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,21 @@ public void shouldReceiveMessagesWithHeaderOrHeaderFilter() throws Exception
k3po.finish();
}

@Test
@Configuration("cache.yaml")
@Specification({
"${app}/filter.header.or.header.eager/client",
"${app}/filter.none/server"})
@ScriptProperty("serverAddress \"zilla://streams/app1\"")
public void shouldReceiveMessagesWithHeaderOrHeaderEagerFilter() throws Exception
{
partition.append(1L);
k3po.start();
k3po.awaitBarrier("RECEIVED_MESSAGE_2");
k3po.notifyBarrier("SEND_MESSAGE_3");
k3po.finish();
}

@Test
@Configuration("cache.yaml")
@Specification({
Expand Down
Loading