Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,7 @@ private void testInsertRowsWithGaps(boolean withSchematization, boolean useSingl
// create tpChannel
SnowflakeSinkService service =
SnowflakeSinkServiceFactory.builder(conn, IngestionMethodConfig.SNOWPIPE_STREAMING, config)
.setRecordNumber(1)
.setRecordNumber(4)
.setErrorReporter(new InMemoryKafkaRecordErrorReporter())
.setSinkTaskContext(new InMemorySinkTaskContext(Collections.singleton(topicPartition)))
.addTask(testTableName, topicPartition)
Expand All @@ -789,19 +789,21 @@ private void testInsertRowsWithGaps(boolean withSchematization, boolean useSingl
i));
}

service.insert(blankRecords);
TestUtils.assertWithRetry(
() -> service.getOffset(new TopicPartition(topic, PARTITION)) == 2, 20, 5);
// service.insert(blankRecords);
// TestUtils.assertWithRetry(
// () -> service.getOffset(new TopicPartition(topic, PARTITION)) == 2, 20, 5);

// Insert another two records with offset gap that requires evolution: 3, 4
List<SinkRecord> gapRecords = TestUtils.createNativeJsonSinkRecords(2, 3, topic, PARTITION);
List<SinkRecord> gapRecords = TestUtils.createNativeJsonSinkRecords(300, 3, topic, PARTITION);
gapRecords.remove(0);
service.insert(gapRecords);

blankRecords.addAll(gapRecords);
service.insert(blankRecords);

// With schematization, we need to resend a new batch should succeed even if there is an offset
// gap from the previous committed offset
if (withSchematization) {
service.insert(gapRecords);
service.insert(blankRecords);
}

TestUtils.assertWithRetry(
Expand Down