Here are the examples of the java api org.apache.iceberg.AssertHelpers.assertThrows() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
149 Examples
19
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateWithConflictingreplacedignments() {
createAndInitTable("id INT, c STRUCT<n1:INT,n2:STRUCT<dn1:INT,dn2:INT>>");
replacedertHelpers.replacedertThrows("Should complain about conflicting updates to a top-level column", replacedysisException.clreplaced, "Updates are in conflict", () -> sql("UPDATE %s t SET t.id = 1, t.c.n1 = 2, t.id = 2", tableName));
replacedertHelpers.replacedertThrows("Should complain about conflicting updates to a nested column", replacedysisException.clreplaced, "Updates are in conflict for these columns", () -> sql("UPDATE %s t SET t.c.n1 = 1, t.id = 2, t.c.n1 = 2", tableName));
replacedertHelpers.replacedertThrows("Should complain about conflicting updates to a nested column", replacedysisException.clreplaced, "Updates are in conflict", () -> {
sql("UPDATE %s SET c.n1 = 1, c = named_struct('n1', 1, 'n2', named_struct('dn1', 1, 'dn2', 2))", tableName);
});
}
19
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateWithInvalidreplacedignments() {
createAndInitTable("id INT NOT NULL, s STRUCT<n1:INT NOT NULL,n2:STRUCT<dn1:INT,dn2:INT>> NOT NULL");
for (String policy : new String[] { "ansi", "strict" }) {
withSQLConf(ImmutableMap.of("spark.sql.storereplacedignmentPolicy", policy), () -> {
replacedertHelpers.replacedertThrows("Should complain about writing nulls to a top-level column", replacedysisException.clreplaced, "Cannot write nullable values to non-null column", () -> sql("UPDATE %s t SET t.id = NULL", tableName));
replacedertHelpers.replacedertThrows("Should complain about writing nulls to a nested column", replacedysisException.clreplaced, "Cannot write nullable values to non-null column", () -> sql("UPDATE %s t SET t.s.n1 = NULL", tableName));
replacedertHelpers.replacedertThrows("Should complain about writing missing fields in structs", replacedysisException.clreplaced, "missing fields", () -> sql("UPDATE %s t SET t.s = named_struct('n1', 1)", tableName));
replacedertHelpers.replacedertThrows("Should complain about writing invalid data types", replacedysisException.clreplaced, "Cannot safely cast", () -> sql("UPDATE %s t SET t.s.n1 = 'str'", tableName));
replacedertHelpers.replacedertThrows("Should complain about writing incompatible structs", replacedysisException.clreplaced, "field name does not match", () -> sql("UPDATE %s t SET t.s.n2 = named_struct('dn2', 1, 'dn1', 2)", tableName));
});
}
}
19
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateWithInvalidUpdates() {
createAndInitTable("id INT, a ARRAY<STRUCT<c1:INT,c2:INT>>, m MAP<STRING,STRING>");
replacedertHelpers.replacedertThrows("Should complain about updating an array column", replacedysisException.clreplaced, "Updating nested fields is only supported for structs", () -> sql("UPDATE %s SET a.c1 = 1", tableName));
replacedertHelpers.replacedertThrows("Should complain about updating a map column", replacedysisException.clreplaced, "Updating nested fields is only supported for structs", () -> sql("UPDATE %s SET m.key = 'new_key'", tableName));
}
19
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateOnNonIcebergTableNotSupported() {
createOrReplaceView("testtable", "{ \"c1\": -100, \"c2\": -200 }");
replacedertHelpers.replacedertThrows("UPDATE is not supported for non iceberg table", UnsupportedOperationException.clreplaced, "not supported temporarily", () -> sql("UPDATE %s SET c1 = -1 WHERE c2 = 1", "testtable"));
}
19
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateWithNonDeterministicCondition() {
createAndInitTable("id INT, dep STRING");
replacedertHelpers.replacedertThrows("Should complain about non-deterministic expressions", replacedysisException.clreplaced, "nondeterministic expressions are only allowed", () -> sql("UPDATE %s SET id = -1 WHERE id = 1 AND rand() > 0.5", tableName));
}
19
Source : TestDelete.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testDeleteOnNonIcebergTableNotSupported() throws NoSuchTableException {
createOrReplaceView("testtable", "{ \"c1\": -100, \"c2\": -200 }");
replacedertHelpers.replacedertThrows("Delete is not supported for non iceberg table", replacedysisException.clreplaced, "DELETE is only supported with v2 tables.", () -> sql("DELETE FROM %s WHERE c1 = -100", "testtable"));
}
19
Source : TestDelete.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testDeleteWithNonDeterministicCondition() {
createAndInitParreplacedionedTable();
sql("INSERT INTO TABLE %s VALUES (1, 'hr'), (2, 'hardware')", tableName);
replacedertHelpers.replacedertThrows("Should complain about non-deterministic expressions", replacedysisException.clreplaced, "nondeterministic expressions are only allowed", () -> sql("DELETE FROM %s WHERE id = 1 AND rand() > 0.5", tableName));
}
19
Source : TestDelete.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testDeleteWithNotInSubqueryNotSupported() throws NoSuchTableException {
createAndInitUnparreplacedionedTable();
append(new Employee(1, "hr"), new Employee(2, "hardware"));
createOrReplaceView("deleted_id", Arrays.asList(-1, -2, null), Encoders.INT());
replacedertHelpers.replacedertThrows("Should complain about NOT IN subquery", replacedysisException.clreplaced, "Null-aware predicate subqueries are not currently supported", () -> sql("DELETE FROM %s WHERE id NOT IN (SELECT * FROM deleted_id)", tableName));
}
19
Source : TestCallStatementParser.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCallParseError() {
replacedertHelpers.replacedertThrows("Should fail with a sensible parse error", ParseException.clreplaced, "missing '(' at 'radish'", () -> parser.parsePlan("CALL cat.system radish kebab"));
}
19
Source : TestForwardCompatibility.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testSparkWriteFailsUnknownTransform() throws IOException {
File parent = temp.newFolder("avro");
File location = new File(parent, "test");
File dataFolder = new File(location, "data");
dataFolder.mkdirs();
HadoopTables tables = new HadoopTables(CONF);
tables.create(SCHEMA, UNKNOWN_SPEC, location.toString());
List<SimpleRecord> expected = Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c"));
Dataset<Row> df = spark.createDataFrame(expected, SimpleRecord.clreplaced);
replacedertHelpers.replacedertThrows("Should reject write with unsupported transform", UnsupportedOperationException.clreplaced, "Cannot write using unsupported transforms: zero", () -> df.select("id", "data").write().format("iceberg").mode("append").save(location.toString()));
}
19
Source : TestDataSourceOptions.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testIncrementalScanOptions() throws IOException {
String tableLocation = temp.newFolder("iceberg-table").toString();
HadoopTables tables = new HadoopTables(CONF);
ParreplacedionSpec spec = ParreplacedionSpec.unparreplacedioned();
Map<String, String> options = Maps.newHashMap();
Table table = tables.create(SCHEMA, spec, options, tableLocation);
List<SimpleRecord> expectedRecords = Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c"), new SimpleRecord(4, "d"));
for (SimpleRecord record : expectedRecords) {
Dataset<Row> originalDf = spark.createDataFrame(Lists.newArrayList(record), SimpleRecord.clreplaced);
originalDf.select("id", "data").write().format("iceberg").mode("append").save(tableLocation);
}
List<Long> snapshotIds = SnapshotUtil.currentAncestors(table);
// start-snapshot-id and snapshot-id are both configured.
replacedertHelpers.replacedertThrows("Check both start-snapshot-id and snapshot-id are configured", IllegalArgumentException.clreplaced, "Cannot specify start-snapshot-id and end-snapshot-id to do incremental scan", () -> {
spark.read().format("iceberg").option("snapshot-id", snapshotIds.get(3).toString()).option("start-snapshot-id", snapshotIds.get(3).toString()).load(tableLocation).explain();
});
// end-snapshot-id and as-of-timestamp are both configured.
replacedertHelpers.replacedertThrows("Check both start-snapshot-id and snapshot-id are configured", IllegalArgumentException.clreplaced, "Cannot specify start-snapshot-id and end-snapshot-id to do incremental scan", () -> {
spark.read().format("iceberg").option(SparkReadOptions.AS_OF_TIMESTAMP, Long.toString(table.snapshot(snapshotIds.get(3)).timestampMillis())).option("end-snapshot-id", snapshotIds.get(2).toString()).load(tableLocation).explain();
});
// only end-snapshot-id is configured.
replacedertHelpers.replacedertThrows("Check both start-snapshot-id and snapshot-id are configured", IllegalArgumentException.clreplaced, "Cannot only specify option end-snapshot-id to do incremental scan", () -> {
spark.read().format("iceberg").option("end-snapshot-id", snapshotIds.get(2).toString()).load(tableLocation).explain();
});
// test (1st snapshot, current snapshot] incremental scan.
List<SimpleRecord> result = spark.read().format("iceberg").option("start-snapshot-id", snapshotIds.get(3).toString()).load(tableLocation).orderBy("id").as(Encoders.bean(SimpleRecord.clreplaced)).collectAsList();
replacedert.replacedertEquals("Records should match", expectedRecords.subList(1, 4), result);
// test (2nd snapshot, 3rd snapshot] incremental scan.
List<SimpleRecord> result1 = spark.read().format("iceberg").option("start-snapshot-id", snapshotIds.get(2).toString()).option("end-snapshot-id", snapshotIds.get(1).toString()).load(tableLocation).orderBy("id").as(Encoders.bean(SimpleRecord.clreplaced)).collectAsList();
replacedert.replacedertEquals("Records should match", expectedRecords.subList(2, 3), result1);
}
19
Source : TestParquetVectorizedReads.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
@Override
public void testNestedStruct() {
replacedertHelpers.replacedertThrows("Vectorized reads are not supported yet for struct fields", UnsupportedOperationException.clreplaced, "Vectorized reads are not supported yet for struct fields", () -> VectorizedSparkParquetReaders.buildReader(TypeUtil.replacedignIncreasingFreshIds(new Schema(required(1, "struct", SUPPORTED_PRIMITIVES))), new MessageType("struct", new GroupType(Type.Repereplacedion.OPTIONAL, "struct").withId(1)), false));
}
19
Source : TestRemoveOrphanFilesAction.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testGarbageCollectionDisabled() {
Table table = TABLES.create(SCHEMA, ParreplacedionSpec.unparreplacedioned(), Maps.newHashMap(), tableLocation);
List<ThreeColumnRecord> records = Lists.newArrayList(new ThreeColumnRecord(1, "AAAAAAAAAA", "AAAA"));
Dataset<Row> df = spark.createDataFrame(records, ThreeColumnRecord.clreplaced).coalesce(1);
df.select("c1", "c2", "c3").write().format("iceberg").mode("append").save(tableLocation);
table.updateProperties().set(TableProperties.GC_ENABLED, "false").commit();
Actions actions = Actions.forTable(table);
replacedertHelpers.replacedertThrows("Should complain about removing orphan files", ValidationException.clreplaced, "Cannot remove orphan files: GC is disabled", actions::removeOrphanFiles);
}
19
Source : TestExpireSnapshotsAction.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testExpireSnapshotsWithDisabledGarbageCollection() {
table.updateProperties().set(TableProperties.GC_ENABLED, "false").commit();
table.newAppend().appendFile(FILE_A).commit();
Actions actions = Actions.forTable(table);
replacedertHelpers.replacedertThrows("Should complain about expiring snapshots", ValidationException.clreplaced, "Cannot expire snapshots: GC is disabled", actions::expireSnapshots);
}
19
Source : TestExpireSnapshotsAction.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testRetainZeroSnapshots() {
replacedertHelpers.replacedertThrows("Should fail retain 0 snapshots " + "because number of snapshots to retain cannot be zero", IllegalArgumentException.clreplaced, "Number of snapshots to retain must be at least 1, cannot be: 0", () -> Actions.forTable(table).expireSnapshots().retainLast(0).execute());
}
19
Source : TestNessieTable.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testFailure() throws NessieNotFoundException, NessieConflictException {
Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
Branch branch = (Branch) client.getTreeApi().getReferenceByName(BRANCH);
IcebergTable table = client.getContentsApi().getContents(KEY, BRANCH).unwrap(IcebergTable.clreplaced).get();
client.getContentsApi().setContents(KEY, branch.getName(), branch.getHash(), "", IcebergTable.of("dummytable.metadata.json"));
replacedertHelpers.replacedertThrows("Update schema fails with conflict exception, ref not up to date", CommitFailedException.clreplaced, () -> icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit());
}
19
Source : TestIcebergInputFormats.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testFailedResidualFiltering() throws Exception {
helper.createTable();
List<Record> expectedRecords = helper.generateRandomRecords(2, 0L);
expectedRecords.get(0).set(2, "2020-03-20");
expectedRecords.get(1).set(2, "2020-03-20");
helper.appendToTable(Row.of("2020-03-20", 0), expectedRecords);
builder.useHiveRows().filter(Expressions.and(Expressions.equal("date", "2020-03-20"), Expressions.equal("id", 0)));
replacedertHelpers.replacedertThrows("Residuals are not evaluated today for Iceberg Generics In memory model of HIVE", UnsupportedOperationException.clreplaced, "Filter expression ref(name=\"id\") == 0 is not completely satisfied.", () -> testInputFormat.create(builder.conf()));
builder.usePigTuples();
replacedertHelpers.replacedertThrows("Residuals are not evaluated today for Iceberg Generics In memory model of PIG", UnsupportedOperationException.clreplaced, "Filter expression ref(name=\"id\") == 0 is not completely satisfied.", () -> testInputFormat.create(builder.conf()));
}
19
Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableWithNotSupportedTypes() {
TableIdentifier identifier = TableIdentifier.of("default", "not_supported_types");
// Can not create INTERVAL types from normal create table, so leave them out from this test
Map<String, Type> notSupportedTypes = ImmutableMap.of("TINYINT", Types.IntegerType.get(), "SMALLINT", Types.IntegerType.get(), "VARCHAR(1)", Types.StringType.get(), "CHAR(1)", Types.StringType.get());
for (String notSupportedType : notSupportedTypes.keySet()) {
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Unsupported Hive type", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE not_supported_types " + "(not_supported " + notSupportedType + ") " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
});
}
}
19
Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableAboveExistingTable() throws IOException {
// Create the Iceberg table
testTables.createIcebergTable(shell.getHiveConf(), "customers", COMPLEX_SCHEMA, FileFormat.PARQUET, Collections.emptyList());
if (Catalogs.hiveCatalog(shell.getHiveConf())) {
// In HiveCatalog we just expect an exception since the table is already exists
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "customers already exists", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "')");
});
} else {
// With other catalogs, table creation should succeed
shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(TableIdentifier.of("default", "customers")));
}
}
19
Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableError() {
TableIdentifier identifier = TableIdentifier.of("default", "withShell2");
// Wrong schema
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Unrecognized token 'WrongSchema'", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='WrongSchema')");
});
// Missing schema, we try to get the schema from the table and fail
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Please provide ", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
});
if (!testTables.locationForCreateTableSQL(identifier).isEmpty()) {
// Only test this if the location is required
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Table location not set", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "')");
});
}
}
19
Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateParreplacedionedTableWithPropertiesAndWithColumnSpecification() {
ParreplacedionSpec spec = ParreplacedionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).idenreplacedy("last_name").build();
replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Provide only one of the following", () -> {
shell.executeStatement("CREATE EXTERNAL TABLE customers (customer_id BIGINT) " + "PARreplacedIONED BY (first_name STRING) " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(TableIdentifier.of("default", "customers")) + " TBLPROPERTIES ('" + InputFormatConfig.PARreplacedION_SPEC + "'='" + ParreplacedionSpecParser.toJson(spec) + "')");
});
}
19
Source : HiveCreateReplaceTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableTxnTableCreatedConcurrently() {
replacedert.replacedertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
Transaction txn = catalog.newCreateTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
// create the table concurrently
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
replacedert.replacedertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
replacedertHelpers.replacedertThrows("Create table txn should fail", AlreadyExistsException.clreplaced, "Table already exists: hivedb.tbl", txn::commitTransaction);
}
19
Source : HiveCreateReplaceTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableTxnTableAlreadyExists() {
replacedert.replacedertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
// create a table before starting a transaction
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
replacedert.replacedertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
replacedertHelpers.replacedertThrows("Should not be possible to start a new create table txn", AlreadyExistsException.clreplaced, "Table already exists: hivedb.tbl", () -> catalog.newCreateTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap()));
}
19
Source : HiveCreateReplaceTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testReplaceTableTxnTableDeletedConcurrently() {
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
replacedert.replacedertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));
Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false);
catalog.dropTable(TABLE_IDENTIFIER);
txn.updateProperties().set("prop", "value").commit();
replacedertHelpers.replacedertThrows("Replace table txn should fail", NoSuchTableException.clreplaced, "No such table: hivedb.tbl", txn::commitTransaction);
}
19
Source : HiveCreateReplaceTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testReplaceTableTxnTableNotExists() {
replacedertHelpers.replacedertThrows("Should not be possible to start a new replace table txn", NoSuchTableException.clreplaced, "No such table: hivedb.tbl", () -> catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false));
}
19
Source : TestFlinkCatalogTable.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testRenameTable() {
replacedume.replacedumeFalse("HadoopCatalog does not support rename table", isHadoopCatalog);
final Schema tableSchema = new Schema(Types.NestedField.optional(0, "id", Types.LongType.get()));
validationCatalog.createTable(TableIdentifier.of(icebergNamespace, "tl"), tableSchema);
sql("ALTER TABLE tl RENAME TO tl2");
replacedertHelpers.replacedertThrows("Should fail if trying to get a nonexistent table", ValidationException.clreplaced, "Table `tl` was not found.", () -> getTableEnv().from("tl"));
Schema actualSchema = FlinkSchemaUtil.convert(getTableEnv().from("tl2").getSchema());
replacedert.replacedertEquals(tableSchema.replacedtruct(), actualSchema.replacedtruct());
}
19
Source : TestFlinkCatalogTable.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableIfNotExists() {
sql("CREATE TABLE tl(id BIGINT)");
// replacedert that table does exist.
replacedert.replacedertEquals(Maps.newHashMap(), table("tl").properties());
sql("DROP TABLE tl");
replacedertHelpers.replacedertThrows("Table 'tl' should be dropped", NoSuchTableException.clreplaced, "Table does not exist: " + getFullQualifiedTableName("tl"), () -> table("tl"));
sql("CREATE TABLE IF NOT EXISTS tl(id BIGINT)");
replacedert.replacedertEquals(Maps.newHashMap(), table("tl").properties());
final String uuid = UUID.randomUUID().toString();
final Map<String, String> expectedProperties = ImmutableMap.of("uuid", uuid);
table("tl").updateProperties().set("uuid", uuid).commit();
replacedert.replacedertEquals(expectedProperties, table("tl").properties());
sql("CREATE TABLE IF NOT EXISTS tl(id BIGINT)");
replacedert.replacedertEquals("Should still be the old table.", expectedProperties, table("tl").properties());
}
19
Source : TestIcebergFilesCommitter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testValidateDataFileExist() throws Exception {
replacedume.replacedumeFalse("Only support equality-delete in format v2.", formatVersion < 2);
long timestamp = 0;
long checkpoint = 10;
JobID jobId = new JobID();
FileAppenderFactory<RowData> appenderFactory = createDeletableAppenderFactory();
RowData insert1 = SimpleDataUtil.createInsert(1, "aaa");
DataFile dataFile1 = writeDataFile("data-file-1", ImmutableList.of(insert1));
try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();
// Txn#1: insert the row <1, 'aaa'>
harness.processElement(WriteResult.builder().addDataFiles(dataFile1).build(), ++timestamp);
harness.snapshot(checkpoint, ++timestamp);
harness.notifyOfCompletedCheckpoint(checkpoint);
// Txn#2: Overwrite the committed data-file-1
RowData insert2 = SimpleDataUtil.createInsert(2, "bbb");
DataFile dataFile2 = writeDataFile("data-file-2", ImmutableList.of(insert2));
new TestTableLoader(tablePath).loadTable().newOverwrite().addFile(dataFile2).deleteFile(dataFile1).commit();
}
try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();
// Txn#3: position-delete the <1, 'aaa'> (NOT committed).
DeleteFile deleteFile1 = writePosDeleteFile(appenderFactory, "pos-delete-file-1", ImmutableList.of(Pair.of(dataFile1.path(), 0L)));
harness.processElement(WriteResult.builder().addDeleteFiles(deleteFile1).addReferencedDataFiles(dataFile1.path()).build(), ++timestamp);
harness.snapshot(++checkpoint, ++timestamp);
// Txn#3: validate will be failure when committing.
final long currentCheckpointId = checkpoint;
replacedertHelpers.replacedertThrows("Validation should be failure because of non-exist data files.", ValidationException.clreplaced, "Cannot commit, missing data files", () -> {
harness.notifyOfCompletedCheckpoint(currentCheckpointId);
return null;
});
}
}
19
Source : TestFlinkIcebergSink.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testJobHashDistributionMode() {
table.updateProperties().set(TableProperties.WRITE_DISTRIBUTION_MODE, DistributionMode.HASH.modeName()).commit();
replacedertHelpers.replacedertThrows("Does not support range distribution-mode now.", IllegalArgumentException.clreplaced, "Flink does not support 'range' write distribution mode now.", () -> {
testWriteRow(null, DistributionMode.RANGE);
return null;
});
}
19
Source : TestLocalScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testAsOfTimeOlderThanFirstSnapshot() {
IcebergGenerics.ScanBuilder scanBuilder = IcebergGenerics.read(sharedTable);
replacedertHelpers.replacedertThrows("Should fail on timestamp sooner than first write", IllegalArgumentException.clreplaced, "Cannot find a snapshot older than ", () -> scanBuilder.asOfTime(/* older than first snapshot */
sharedTable.history().get(0).timestampMillis() - 1));
}
19
Source : TestLocalScan.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUnknownSnapshotId() {
Long minSnapshotId = sharedTable.history().stream().map(h -> h.snapshotId()).min(Long::compareTo).get();
IcebergGenerics.ScanBuilder scanBuilder = IcebergGenerics.read(sharedTable);
replacedertHelpers.replacedertThrows("Should fail on unknown snapshot id", IllegalArgumentException.clreplaced, "Cannot find snapshot with ID ", () -> scanBuilder.useSnapshot(/* unknown snapshot id */
minSnapshotId - 1));
}
19
Source : TestHadoopTables.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testDropTable() {
TABLES.create(SCHEMA, tableDir.toURI().toString());
TABLES.dropTable(tableDir.toURI().toString());
replacedertHelpers.replacedertThrows("Should complain about missing table", NoSuchTableException.clreplaced, "Table does not exist", () -> TABLES.load(tableDir.toURI().toString()));
}
19
Source : TestHadoopCatalog.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testTableBuilderWithLocation() throws Exception {
Configuration conf = new Configuration();
String warehousePath = temp.newFolder().getAbsolutePath();
HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").create());
replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").createTransaction());
replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").createOrReplaceTransaction());
}
19
Source : TestHadoopCatalog.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testRenameTable() throws Exception {
Configuration conf = new Configuration();
String warehousePath = temp.newFolder().getAbsolutePath();
HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
TableIdentifier testTable = TableIdentifier.of("db", "tbl1");
catalog.createTable(testTable, SCHEMA, ParreplacedionSpec.unparreplacedioned());
replacedertHelpers.replacedertThrows("should throw exception", UnsupportedOperationException.clreplaced, "Cannot rename Hadoop tables", () -> {
catalog.renameTable(testTable, TableIdentifier.of("db", "tbl2"));
});
}
19
Source : TestHadoopCatalog.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testVersionHintFileMissingMetadata() throws Exception {
addVersionsToTable(table);
HadoopTableOperations tableOperations = (HadoopTableOperations) TABLES.newTableOps(tableLocation);
long secondSnapshotId = table.currentSnapshot().snapshotId();
// Write old data to confirm that we are writing the correct file
FileIO io = table.io();
io.deleteFile(versionHintFile.getPath());
// Remove the first version file, and see if we can recover
io.deleteFile(tableOperations.getMetadataFile(1).toString());
// Check the result of the findVersion(), and load the table and check the current snapshotId
replacedert.replacedertEquals(3, tableOperations.findVersion());
replacedert.replacedertEquals(secondSnapshotId, TABLES.load(tableLocation).currentSnapshot().snapshotId());
// Remove all the version files, and see if we can recover. Hint... not :)
io.deleteFile(tableOperations.getMetadataFile(2).toString());
io.deleteFile(tableOperations.getMetadataFile(3).toString());
// Check that we got 0 findVersion, and a NoSuchTableException is thrown when trying to load the table
replacedert.replacedertEquals(0, tableOperations.findVersion());
replacedertHelpers.replacedertThrows("Should not be able to find the table", NoSuchTableException.clreplaced, "Table does not exist", () -> TABLES.load(tableLocation));
}
19
Source : TestAvroNameMapping.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testMissingRequiredFields() {
Schema writeSchema = new Schema(Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()));
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("x", 1);
record.put("y", 2);
// table mapping not projecting a required field 'x'
NameMapping nameMapping = MappingUtil.create(new Schema(Types.NestedField.optional(18, "y", Types.IntegerType.get())));
Schema readSchema = writeSchema;
replacedertHelpers.replacedertThrows("Missing required field in nameMapping", IllegalArgumentException.clreplaced, "Missing required field: x", // In this case, pruneColumns result is an empty record
() -> writeAndRead(writeSchema, readSchema, record, nameMapping));
}
19
Source : GlueCatalogTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void constructor_emptyWarehousePath() {
replacedertHelpers.replacedertThrows("warehouse path cannot be null", IllegalArgumentException.clreplaced, "Cannot initialize GlueCatalog because warehousePath must not be null", () -> {
GlueCatalog catalog = new GlueCatalog();
catalog.initialize(CATALOG_NAME, null, new AwsProperties(), glue, LockManagers.defaultLockManager(), null);
});
}
19
Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableBadName() {
String namespace = createNamespace();
replacedertHelpers.replacedertThrows("should not create table with bad name", IllegalArgumentException.clreplaced, "Invalid table identifier", () -> glueCatalog.createTable(TableIdentifier.of(namespace, "table-1"), schema, parreplacedionSpec));
}
19
Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testRenameTable_failToDeleteOldTable() {
String namespace = createNamespace();
String tableName = createTable(namespace);
TableIdentifier id = TableIdentifier.of(namespace, tableName);
Table table = glueCatalog.loadTable(id);
// delete the old table metadata, so that drop old table will fail
String newTableName = tableName + "_2";
glue.updateTable(UpdateTableRequest.builder().databaseName(namespace).tableInput(TableInput.builder().name(tableName).parameters(Maps.newHashMap()).build()).build());
replacedertHelpers.replacedertThrows("should fail to rename", ValidationException.clreplaced, "Input Glue table is not an iceberg table", () -> glueCatalog.renameTable(TableIdentifier.of(namespace, tableName), TableIdentifier.of(namespace, newTableName)));
replacedertHelpers.replacedertThrows("renamed table should be deleted", EnreplacedyNotFoundException.clreplaced, "not found", () -> glue.getTable(GetTableRequest.builder().databaseName(namespace).name(newTableName).build()));
}
19
Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testCreateTableDuplicate() {
String namespace = createNamespace();
String tableName = createTable(namespace);
replacedertHelpers.replacedertThrows("should not create table with the same name", AlreadyExistsException.clreplaced, "Table already exists", () -> glueCatalog.createTable(TableIdentifier.of(namespace, tableName), schema, parreplacedionSpec));
}
19
Source : DynamoLockManagerTest.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testTableCreationFailure() {
DynamoDbClient dynamo2 = Mockito.mock(DynamoDbClient.clreplaced);
Mockito.doThrow(ResourceNotFoundException.clreplaced).when(dynamo2).describeTable(Mockito.any(DescribeTableRequest.clreplaced));
replacedertHelpers.replacedertThrows("should fail to initialize the lock manager", IllegalStateException.clreplaced, "Cannot find Dynamo table", () -> new DynamoLockManager(dynamo2, lockTableName));
}
19
Source : TestBucketing.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testVerifiedIllegalNumBuckets() {
replacedertHelpers.replacedertThrows("Should fail if numBucket is less than or equal to zero", IllegalArgumentException.clreplaced, "Invalid number of buckets: 0 (must be > 0)", () -> Bucket.get(Types.IntegerType.get(), 0));
}
19
Source : TestExpressionHelpers.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private void replacedertInvalidateNaNThrows(Callable<UnboundPredicate<Double>> callable) {
replacedertHelpers.replacedertThrows("Should invalidate NaN input", IllegalArgumentException.clreplaced, "Cannot create expression literal from NaN", callable);
}
18
Source : TestUpdate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testUpdateWithNotInSubqueryNotSupported() {
createAndInitTable("id INT, dep STRING");
createOrReplaceView("updated_id", Arrays.asList(-1, -2, null), Encoders.INT());
replacedertHelpers.replacedertThrows("Should complain about NOT IN subquery", replacedysisException.clreplaced, "Null-aware predicate subqueries are not currently supported", () -> sql("UPDATE %s SET id = -1 WHERE id NOT IN (SELECT * FROM updated_id)", tableName));
}
18
Source : TestSetCurrentSnapshotProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testInvalidRollbackToSnapshotCases() {
replacedertHelpers.replacedertThrows("Should not allow mixed args", replacedysisException.clreplaced, "Named and positional arguments cannot be mixed", () -> sql("CALL %s.system.set_current_snapshot(namespace => 'n1', table => 't', 1L)", catalogName));
replacedertHelpers.replacedertThrows("Should not resolve procedures in arbitrary namespaces", NoSuchProcedureException.clreplaced, "not found", () -> sql("CALL %s.custom.set_current_snapshot('n', 't', 1L)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.set_current_snapshot('t')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.set_current_snapshot(1L)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.set_current_snapshot(snapshot_id => 1L)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.set_current_snapshot(table => 't')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with invalid arg types", replacedysisException.clreplaced, "Wrong arg type for snapshot_id: cannot cast", () -> sql("CALL %s.system.set_current_snapshot('t', 2.2)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with empty table identifier", IllegalArgumentException.clreplaced, "Cannot handle an empty identifier", () -> sql("CALL %s.system.set_current_snapshot('', 1L)", catalogName));
}
18
Source : TestSetCurrentSnapshotProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testSetCurrentSnapshotToInvalidSnapshot() {
sql("CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg", tableName);
Namespace namespace = tableIdent.namespace();
String tableName = tableIdent.name();
replacedertHelpers.replacedertThrows("Should reject invalid snapshot id", ValidationException.clreplaced, "Cannot roll back to unknown snapshot id", () -> sql("CALL %s.system.set_current_snapshot('%s', -1L)", catalogName, tableIdent));
}
18
Source : TestRollbackToTimestampProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testInvalidRollbackToTimestampCases() {
String timestamp = "TIMESTAMP '2007-12-03T10:15:30'";
replacedertHelpers.replacedertThrows("Should not allow mixed args", replacedysisException.clreplaced, "Named and positional arguments cannot be mixed", () -> sql("CALL %s.system.rollback_to_timestamp(namespace => 'n1', 't', %s)", catalogName, timestamp));
replacedertHelpers.replacedertThrows("Should not resolve procedures in arbitrary namespaces", NoSuchProcedureException.clreplaced, "not found", () -> sql("CALL %s.custom.rollback_to_timestamp('n', 't', %s)", catalogName, timestamp));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_timestamp('t')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_timestamp(timestamp => %s)", catalogName, timestamp));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_timestamp(table => 't')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with extra args", replacedysisException.clreplaced, "Too many arguments", () -> sql("CALL %s.system.rollback_to_timestamp('n', 't', %s, 1L)", catalogName, timestamp));
replacedertHelpers.replacedertThrows("Should reject calls with invalid arg types", replacedysisException.clreplaced, "Wrong arg type for timestamp: cannot cast", () -> sql("CALL %s.system.rollback_to_timestamp('t', 2.2)", catalogName));
}
18
Source : TestRollbackToSnapshotProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testInvalidRollbackToSnapshotCases() {
replacedertHelpers.replacedertThrows("Should not allow mixed args", replacedysisException.clreplaced, "Named and positional arguments cannot be mixed", () -> sql("CALL %s.system.rollback_to_snapshot(namespace => 'n1', table => 't', 1L)", catalogName));
replacedertHelpers.replacedertThrows("Should not resolve procedures in arbitrary namespaces", NoSuchProcedureException.clreplaced, "not found", () -> sql("CALL %s.custom.rollback_to_snapshot('n', 't', 1L)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_snapshot('t')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_snapshot(1L)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rollback_to_snapshot(table => 't')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with invalid arg types", replacedysisException.clreplaced, "Wrong arg type for snapshot_id: cannot cast", () -> sql("CALL %s.system.rollback_to_snapshot('t', 2.2)", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with empty table identifier", IllegalArgumentException.clreplaced, "Cannot handle an empty identifier", () -> sql("CALL %s.system.rollback_to_snapshot('', 1L)", catalogName));
}
18
Source : TestRollbackToSnapshotProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testRollbackToInvalidSnapshot() {
sql("CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg", tableName);
replacedertHelpers.replacedertThrows("Should reject invalid snapshot id", ValidationException.clreplaced, "Cannot roll back to unknown snapshot id", () -> sql("CALL %s.system.rollback_to_snapshot('%s', -1L)", catalogName, tableIdent));
}
18
Source : TestRewriteManifestsProcedure.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testInvalidRewriteManifestsCases() {
replacedertHelpers.replacedertThrows("Should not allow mixed args", replacedysisException.clreplaced, "Named and positional arguments cannot be mixed", () -> sql("CALL %s.system.rewrite_manifests('n', table => 't')", catalogName));
replacedertHelpers.replacedertThrows("Should not resolve procedures in arbitrary namespaces", NoSuchProcedureException.clreplaced, "not found", () -> sql("CALL %s.custom.rewrite_manifests('n', 't')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls without all required args", replacedysisException.clreplaced, "Missing required parameters", () -> sql("CALL %s.system.rewrite_manifests()", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with invalid arg types", replacedysisException.clreplaced, "Wrong arg type", () -> sql("CALL %s.system.rewrite_manifests('n', 2.2)", catalogName));
replacedertHelpers.replacedertThrows("Should reject duplicate arg names name", replacedysisException.clreplaced, "Duplicate procedure argument: table", () -> sql("CALL %s.system.rewrite_manifests(table => 't', tAbLe => 't')", catalogName));
replacedertHelpers.replacedertThrows("Should reject calls with empty table identifier", IllegalArgumentException.clreplaced, "Cannot handle an empty identifier", () -> sql("CALL %s.system.rewrite_manifests('')", catalogName));
}
See More Examples