org.apache.iceberg.catalog.TableIdentifier.of()

Here are the examples of the java api org.apache.iceberg.catalog.TableIdentifier.of() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

138 Examples 7

19 Source : Spark3Util.java
with Apache License 2.0
from apache

public static TableIdentifier identifierToTableIdentifier(Identifier identifier) {
    return TableIdentifier.of(Namespace.of(identifier.namespace()), identifier.name());
}

19 Source : TestSparkReaderDeletes.java
with Apache License 2.0
from apache

@Override
protected void dropTable(String name) {
    catalog.dropTable(TableIdentifier.of("default", name));
}

19 Source : TestBranchVisibility.java
with Apache License 2.0
from apache

@Test
public void testCatalogWithTableNames() throws NessieNotFoundException {
    updateSchema(testCatalog, tableIdentifier2);
    String mainHash = tree.getReferenceByName("main").getHash();
    // asking for table@branch gives expected regardless of catalog
    replacedert.replacedertEquals(metadataLocation(catalog, TableIdentifier.of("test-ns", "table1@test")), metadataLocation(testCatalog, tableIdentifier1));
    // asking for table@branch#hash gives expected regardless of catalog
    replacedert.replacedertEquals(metadataLocation(catalog, TableIdentifier.of("test-ns", "table1@" + mainHash)), metadataLocation(testCatalog, tableIdentifier1));
}

19 Source : TestFlinkCatalogTable.java
with Apache License 2.0
from apache

private Table table(String name) {
    return validationCatalog.loadTable(TableIdentifier.of(icebergNamespace, name));
}

19 Source : GlueTestBase.java
with Apache License 2.0
from apache

public static String createTable(String namespace, String tableName) {
    glueCatalog.createTable(TableIdentifier.of(namespace, tableName), schema, parreplacedionSpec);
    return tableName;
}

19 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testTableExists() {
    String namespace = createNamespace();
    String tableName = createTable(namespace);
    replacedert.replacedertTrue(glueCatalog.tableExists(TableIdentifier.of(namespace, tableName)));
}

18 Source : TestRemoveOrphanFilesAction.java
with Apache License 2.0
from apache

@Test
public void testHiveCatalogTable() throws IOException {
    Table table = catalog.createTable(TableIdentifier.of("default", "hivetestorphan"), SCHEMA, SPEC, tableLocation, Maps.newHashMap());
    List<ThreeColumnRecord> records = Lists.newArrayList(new ThreeColumnRecord(1, "AAAAAAAAAA", "AAAA"));
    Dataset<Row> df = spark.createDataFrame(records, ThreeColumnRecord.clreplaced).coalesce(1);
    df.select("c1", "c2", "c3").write().format("iceberg").mode("append").save("default.hivetestorphan");
    String location = table.location().replaceFirst("file:", "");
    new File(location + "/data/trashfile").createNewFile();
    List<String> results = Actions.forTable(table).removeOrphanFiles().olderThan(System.currentTimeMillis() + 1000).execute();
    replacedert.replacedertTrue("trash file should be removed", results.contains("file:" + location + "data/trashfile"));
}

18 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithColumnSpecificationHierarchy() {
    TableIdentifier identifier = TableIdentifier.of("default", "customers");
    shell.executeStatement("CREATE EXTERNAL TABLE customers (" + "id BIGINT, name STRING, " + "employee_info STRUCT < employer: STRING, id: BIGINT, address: STRING >, " + "places_lived ARRAY < STRUCT <street: STRING, city: STRING, country: STRING >>, " + "memorable_moments MAP < STRING, STRUCT < year: INT, place: STRING, details: STRING >>, " + "current_address STRUCT < street_address: STRUCT " + "<street_number: INT, street_name: STRING, street_type: STRING>, country: STRING, postal_code: STRING >) " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
    // Check the Iceberg table data
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    replacedert.replacedertEquals(COMPLEX_SCHEMA.replacedtruct(), icebergTable.schema().replacedtruct());
}

18 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithoutSpec() {
    TableIdentifier identifier = TableIdentifier.of("default", "customers");
    shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "')");
    // Check the Iceberg table parreplacedion data
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    replacedert.replacedertEquals(ParreplacedionSpec.unparreplacedioned(), icebergTable.spec());
}

18 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableError() {
    TableIdentifier identifier = TableIdentifier.of("default", "withShell2");
    // Wrong schema
    replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Unrecognized token 'WrongSchema'", () -> {
        shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='WrongSchema')");
    });
    // Missing schema, we try to get the schema from the table and fail
    replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Please provide ", () -> {
        shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
    });
    if (!testTables.locationForCreateTableSQL(identifier).isEmpty()) {
        // Only test this if the location is required
        replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Table location not set", () -> {
            shell.executeStatement("CREATE EXTERNAL TABLE withShell2 " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "')");
        });
    }
}

18 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithUnparreplacedionedSpec() {
    TableIdentifier identifier = TableIdentifier.of("default", "customers");
    // We need the location for HadoopTable based tests only
    shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "', " + "'" + InputFormatConfig.PARreplacedION_SPEC + "'='" + ParreplacedionSpecParser.toJson(ParreplacedionSpec.unparreplacedioned()) + "')");
    // Check the Iceberg table parreplacedion data
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    replacedert.replacedertEquals(SPEC, icebergTable.spec());
}

18 Source : TestFlinkReaderDeletesBase.java
with Apache License 2.0
from apache

@Override
protected void dropTable(String name) {
    catalog.dropTable(TableIdentifier.of(databaseName, name));
}

18 Source : FlinkCatalog.java
with Apache License 2.0
from apache

TableIdentifier toIdentifier(ObjectPath path) {
    return TableIdentifier.of(toNamespace(path.getDatabaseName()), path.getObjectName());
}

18 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testCreateTableBadName() {
    String namespace = createNamespace();
    replacedertHelpers.replacedertThrows("should not create table with bad name", IllegalArgumentException.clreplaced, "Invalid table identifier", () -> glueCatalog.createTable(TableIdentifier.of(namespace, "table-1"), schema, parreplacedionSpec));
}

18 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testCreateTableDuplicate() {
    String namespace = createNamespace();
    String tableName = createTable(namespace);
    replacedertHelpers.replacedertThrows("should not create table with the same name", AlreadyExistsException.clreplaced, "Table already exists", () -> glueCatalog.createTable(TableIdentifier.of(namespace, tableName), schema, parreplacedionSpec));
}

17 Source : TestIcebergSourceTablesBase.java
with Apache License 2.0
from apache

@Test
public void testCountEntriesTable() {
    TableIdentifier tableIdentifier = TableIdentifier.of("db", "count_entries_test");
    createTable(tableIdentifier, SCHEMA, ParreplacedionSpec.unparreplacedioned());
    // init load
    List<SimpleRecord> records = Lists.newArrayList(new SimpleRecord(1, "1"));
    Dataset<Row> inputDf = spark.createDataFrame(records, SimpleRecord.clreplaced);
    inputDf.select("id", "data").write().format("iceberg").mode("append").save(loadLocation(tableIdentifier));
    final int expectedEntryCount = 1;
    // count entries
    replacedert.replacedertEquals("Count should return " + expectedEntryCount, expectedEntryCount, spark.read().format("iceberg").load(loadLocation(tableIdentifier, "entries")).count());
    // count all_entries
    replacedert.replacedertEquals("Count should return " + expectedEntryCount, expectedEntryCount, spark.read().format("iceberg").load(loadLocation(tableIdentifier, "all_entries")).count());
}

17 Source : NessieUtil.java
with Apache License 2.0
from apache

static TableIdentifier toIdentifier(EntriesResponse.Entry entry) {
    List<String> elements = entry.getName().getElements();
    return TableIdentifier.of(elements.toArray(new String[elements.size()]));
}

17 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithAllSupportedTypes() {
    TableIdentifier identifier = TableIdentifier.of("default", "all_types");
    Schema allSupportedSchema = new Schema(optional(1, "t_float", Types.FloatType.get()), optional(2, "t_double", Types.DoubleType.get()), optional(3, "t_boolean", Types.BooleanType.get()), optional(4, "t_int", Types.IntegerType.get()), optional(5, "t_bigint", Types.LongType.get()), optional(6, "t_binary", Types.BinaryType.get()), optional(7, "t_string", Types.StringType.get()), optional(8, "t_timestamp", Types.TimestampType.withoutZone()), optional(9, "t_date", Types.DateType.get()), optional(10, "t_decimal", Types.DecimalType.of(3, 2)));
    // Intentionally adding some mixed letters to test that we handle them correctly
    shell.executeStatement("CREATE EXTERNAL TABLE all_types (" + "t_Float FLOaT, t_dOuble DOUBLE, t_boolean BOOLEAN, t_int INT, t_bigint BIGINT, t_binary BINARY, " + "t_string STRING, t_timestamp TIMESTAMP, t_date DATE, t_decimal DECIMAL(3,2)) " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
    // Check the Iceberg table data
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    replacedert.replacedertEquals(allSupportedSchema.replacedtruct(), icebergTable.schema().replacedtruct());
}

17 Source : HiveCatalog.java
with Apache License 2.0
from apache

private TableIdentifier removeCatalogName(TableIdentifier to) {
    if (isValidIdentifier(to)) {
        return to;
    }
    // check if the identifier includes the catalog name and remove it
    if (to.namespace().levels().length == 2 && name().equalsIgnoreCase(to.namespace().level(0))) {
        return TableIdentifier.of(Namespace.of(to.namespace().level(1)), to.name());
    }
    // return the original unmodified
    return to;
}

17 Source : HiveCatalog.java
with Apache License 2.0
from apache

@Override
public List<TableIdentifier> listTables(Namespace namespace) {
    Preconditions.checkArgument(isValidateNamespace(namespace), "Missing database in namespace: %s", namespace);
    String database = namespace.level(0);
    try {
        List<String> tableNames = clients.run(client -> client.getAllTables(database));
        List<Table> tableObjects = clients.run(client -> client.getTableObjectsByName(database, tableNames));
        List<TableIdentifier> tableIdentifiers = tableObjects.stream().filter(table -> table.getParameters() == null ? false : BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getParameters().get(BaseMetastoreTableOperations.TABLE_TYPE_PROP))).map(table -> TableIdentifier.of(namespace, table.getTableName())).collect(Collectors.toList());
        LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, tableIdentifiers);
        return tableIdentifiers;
    } catch (UnknownDBException e) {
        throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
    } catch (TException e) {
        throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException("Interrupted in call to listTables", e);
    }
}

17 Source : TestFlinkTableSink.java
with Apache License 2.0
from apache

@Test
public void testInsertIntoParreplacedion() throws Exception {
    String tableName = "test_insert_into_parreplacedion";
    sql("CREATE TABLE %s(id INT, data VARCHAR) PARreplacedIONED BY (data) WITH ('write.format.default'='%s')", tableName, format.name());
    Table parreplacedionedTable = validationCatalog.loadTable(TableIdentifier.of(icebergNamespace, tableName));
    // Full parreplacedion.
    sql("INSERT INTO %s PARreplacedION (data='a') SELECT 1", tableName);
    sql("INSERT INTO %s PARreplacedION (data='a') SELECT 2", tableName);
    sql("INSERT INTO %s PARreplacedION (data='b') SELECT 3", tableName);
    SimpleDataUtil.replacedertTableRecords(parreplacedionedTable, Lists.newArrayList(SimpleDataUtil.createRecord(1, "a"), SimpleDataUtil.createRecord(2, "a"), SimpleDataUtil.createRecord(3, "b")));
    // Partial parreplacedion.
    sql("INSERT INTO %s SELECT 4, 'c'", tableName);
    sql("INSERT INTO %s SELECT 5, 'd'", tableName);
    SimpleDataUtil.replacedertTableRecords(parreplacedionedTable, Lists.newArrayList(SimpleDataUtil.createRecord(1, "a"), SimpleDataUtil.createRecord(2, "a"), SimpleDataUtil.createRecord(3, "b"), SimpleDataUtil.createRecord(4, "c"), SimpleDataUtil.createRecord(5, "d")));
    sql("DROP TABLE IF EXISTS %s.%s", flinkDatabase, tableName);
}

17 Source : TestFlinkCatalogDatabase.java
with Apache License 2.0
from apache

@Test
public void testDropNonEmptyNamespace() {
    replacedume.replacedumeFalse("Hadoop catalog throws IOException: Directory is not empty.", isHadoopCatalog);
    replacedert.replacedertFalse("Namespace should not already exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
    sql("CREATE DATABASE %s", flinkDatabase);
    validationCatalog.createTable(TableIdentifier.of(icebergNamespace, "tl"), new Schema(Types.NestedField.optional(0, "id", Types.LongType.get())));
    replacedert.replacedertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
    replacedert.replacedertTrue("Table should exist", validationCatalog.tableExists(TableIdentifier.of(icebergNamespace, "tl")));
    replacedertHelpers.replacedertThrowsCause("Should fail if trying to delete a non-empty database", DatabaseNotEmptyException.clreplaced, String.format("Database %s in catalog %s is not empty.", DATABASE, catalogName), () -> sql("DROP DATABASE %s", flinkDatabase));
    sql("DROP TABLE %s.tl", flinkDatabase);
}

17 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testDropTable() throws Exception {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
    catalog.createTable(testTable, SCHEMA, ParreplacedionSpec.unparreplacedioned());
    String metaLocation = catalog.defaultWarehouseLocation(testTable);
    FileSystem fs = Util.getFs(new Path(metaLocation), conf);
    replacedert.replacedertTrue(fs.isDirectory(new Path(metaLocation)));
    catalog.dropTable(testTable);
    replacedert.replacedertFalse(fs.isDirectory(new Path(metaLocation)));
}

17 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testTableBuilderWithLocation() throws Exception {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
    replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").create());
    replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").createTransaction());
    replacedertHelpers.replacedertThrows("Should reject a custom location", IllegalArgumentException.clreplaced, "Cannot set a custom location for a path-based table", () -> catalog.buildTable(tableIdent, SCHEMA).withLocation("custom").createOrReplaceTransaction());
}

17 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testNamespaceExists() throws IOException {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
    TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
    TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
    TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
    Lists.newArrayList(tbl1, tbl2, tbl3, tbl4).forEach(t -> catalog.createTable(t, SCHEMA, ParreplacedionSpec.unparreplacedioned()));
    replacedert.replacedertTrue("Should true to namespace exist", catalog.namespaceExists(Namespace.of("db", "ns1", "ns2")));
    replacedert.replacedertTrue("Should false to namespace doesn't exist", !catalog.namespaceExists(Namespace.of("db", "db2", "ns2")));
}

17 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testBasicCatalog() throws Exception {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
    catalog.createTable(testTable, SCHEMA, ParreplacedionSpec.unparreplacedioned());
    String metaLocation = catalog.defaultWarehouseLocation(testTable);
    FileSystem fs = Util.getFs(new Path(metaLocation), conf);
    replacedert.replacedertTrue(fs.isDirectory(new Path(metaLocation)));
    catalog.dropTable(testTable);
    replacedert.replacedertFalse(fs.isDirectory(new Path(metaLocation)));
}

17 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testRenameTable() throws Exception {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier testTable = TableIdentifier.of("db", "tbl1");
    catalog.createTable(testTable, SCHEMA, ParreplacedionSpec.unparreplacedioned());
    replacedertHelpers.replacedertThrows("should throw exception", UnsupportedOperationException.clreplaced, "Cannot rename Hadoop tables", () -> {
        catalog.renameTable(testTable, TableIdentifier.of("db", "tbl2"));
    });
}

16 Source : TestNamespaceSQL.java
with Apache License 2.0
from apache

@Test
public void testDropNonEmptyNamespace() {
    replacedume.replacedumeFalse("Session catalog has flaky behavior", "spark_catalog".equals(catalogName));
    replacedert.replacedertFalse("Namespace should not already exist", validationNamespaceCatalog.namespaceExists(NS));
    sql("CREATE NAMESPACE %s", fullNamespace);
    sql("CREATE TABLE %s.table (id bigint) USING iceberg", fullNamespace);
    replacedert.replacedertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(NS));
    replacedert.replacedertTrue("Table should exist", validationCatalog.tableExists(TableIdentifier.of(NS, "table")));
    replacedertHelpers.replacedertThrows("Should fail if trying to delete a non-empty namespace", SparkException.clreplaced, "non-empty namespace", () -> sql("DROP NAMESPACE %s", fullNamespace));
    sql("DROP TABLE %s.table", fullNamespace);
}

16 Source : TestSparkReaderDeletes.java
with Apache License 2.0
from apache

@Override
public StructLikeSet rowSet(String name, Table table, String... columns) {
    Dataset<Row> df = spark.read().format("iceberg").load(TableIdentifier.of("default", name).toString()).selectExpr(columns);
    Types.StructType projection = table.schema().select(columns).replacedtruct();
    StructLikeSet set = StructLikeSet.create(projection);
    df.collectAsList().forEach(row -> {
        SparkStructLike rowWrapper = new SparkStructLike(projection);
        set.add(rowWrapper.wrap(row));
    });
    return set;
}

16 Source : TestIcebergSourceTablesBase.java
with Apache License 2.0
from apache

@Test
public void testEntriesTableWithSnapshotIdInheritance() throws Exception {
    spark.sql("DROP TABLE IF EXISTS parquet_table");
    TableIdentifier tableIdentifier = TableIdentifier.of("db", "entries_inheritance_test");
    ParreplacedionSpec spec = ParreplacedionSpec.builderFor(SCHEMA).idenreplacedy("id").build();
    Table table = createTable(tableIdentifier, SCHEMA, spec);
    table.updateProperties().set(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED, "true").commit();
    spark.sql(String.format("CREATE TABLE parquet_table (data string, id int) " + "USING parquet PARreplacedIONED BY (id) LOCATION '%s'", temp.newFolder()));
    List<SimpleRecord> records = Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "b"));
    Dataset<Row> inputDF = spark.createDataFrame(records, SimpleRecord.clreplaced);
    inputDF.select("data", "id").write().mode("overwrite").insertInto("parquet_table");
    try {
        String stagingLocation = table.location() + "/metadata";
        SparkTableUtil.importSparkTable(spark, new org.apache.spark.sql.catalyst.TableIdentifier("parquet_table"), table, stagingLocation);
        List<Row> actual = spark.read().format("iceberg").load(loadLocation(tableIdentifier, "entries")).select("sequence_number", "snapshot_id", "data_file").collectAsList();
        table.refresh();
        long snapshotId = table.currentSnapshot().snapshotId();
        replacedert.replacedertEquals("Entries table should have 2 rows", 2, actual.size());
        replacedert.replacedertEquals("Sequence number must match", 0, actual.get(0).getLong(0));
        replacedert.replacedertEquals("Snapshot id must match", snapshotId, actual.get(0).getLong(1));
        replacedert.replacedertEquals("Sequence number must match", 0, actual.get(1).getLong(0));
        replacedert.replacedertEquals("Snapshot id must match", snapshotId, actual.get(1).getLong(1));
    } finally {
        spark.sql("DROP TABLE parquet_table");
    }
}

16 Source : TestIcebergSourceTablesBase.java
with Apache License 2.0
from apache

@Test
public synchronized void testTablesSupport() {
    TableIdentifier tableIdentifier = TableIdentifier.of("db", "table");
    createTable(tableIdentifier, SCHEMA, ParreplacedionSpec.unparreplacedioned());
    List<SimpleRecord> expectedRecords = Lists.newArrayList(new SimpleRecord(1, "1"), new SimpleRecord(2, "2"), new SimpleRecord(3, "3"));
    Dataset<Row> inputDf = spark.createDataFrame(expectedRecords, SimpleRecord.clreplaced);
    inputDf.select("id", "data").write().format("iceberg").mode(SaveMode.Append).save(loadLocation(tableIdentifier));
    Dataset<Row> resultDf = spark.read().format("iceberg").load(loadLocation(tableIdentifier));
    List<SimpleRecord> actualRecords = resultDf.orderBy("id").as(Encoders.bean(SimpleRecord.clreplaced)).collectAsList();
    replacedert.replacedertEquals("Records should match", expectedRecords, actualRecords);
}

16 Source : TestIcebergSourceHiveTables.java
with Apache License 2.0
from apache

@Override
public Table loadTable(TableIdentifier ident, String entriesSuffix) {
    TableIdentifier identifier = TableIdentifier.of(ident.namespace().level(0), ident.name(), entriesSuffix);
    return TestIcebergSourceHiveTables.catalog.loadTable(identifier);
}

16 Source : TestTables.java
with Apache License 2.0
from apache

/**
 * Creates a parreplacedioned Hive test table using Hive SQL. The table will be in the 'default' database.
 * The table will be populated with the provided List of {@link Record}s using a Hive insert statement.
 * @param shell The HiveShell used for Hive table creation
 * @param tableName The name of the test table
 * @param schema The schema used for the table creation
 * @param spec The parreplacedion specification for the table
 * @param fileFormat The file format used for writing the data
 * @param records The records with which the table is populated
 * @return The created table
 * @throws IOException If there is an error writing data
 */
public Table createTable(TestHiveShell shell, String tableName, Schema schema, ParreplacedionSpec spec, FileFormat fileFormat, List<Record> records) {
    TableIdentifier identifier = TableIdentifier.of("default", tableName);
    shell.executeStatement("CREATE EXTERNAL TABLE " + identifier + " STORED BY '" + HiveIcebergStorageHandler.clreplaced.getName() + "' " + locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(schema) + "', " + "'" + InputFormatConfig.PARreplacedION_SPEC + "'='" + ParreplacedionSpecParser.toJson(spec) + "', " + "'" + TableProperties.DEFAULT_FILE_FORMAT + "'='" + fileFormat + "')");
    if (records != null && !records.isEmpty()) {
        StringBuilder query = new StringBuilder().append("INSERT INTO " + identifier + " VALUES ");
        records.forEach(record -> {
            query.append("(");
            query.append(record.struct().fields().stream().map(field -> getStringValueForInsert(record.getField(field.name()), field.type())).collect(Collectors.joining(",")));
            query.append("),");
        });
        query.setLength(query.length() - 1);
        shell.executeStatement(query.toString());
    }
    return loadTable(identifier);
}

16 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithNotSupportedTypes() {
    TableIdentifier identifier = TableIdentifier.of("default", "not_supported_types");
    // Can not create INTERVAL types from normal create table, so leave them out from this test
    Map<String, Type> notSupportedTypes = ImmutableMap.of("TINYINT", Types.IntegerType.get(), "SMALLINT", Types.IntegerType.get(), "VARCHAR(1)", Types.StringType.get(), "CHAR(1)", Types.StringType.get());
    for (String notSupportedType : notSupportedTypes.keySet()) {
        replacedertHelpers.replacedertThrows("should throw exception", IllegalArgumentException.clreplaced, "Unsupported Hive type", () -> {
            shell.executeStatement("CREATE EXTERNAL TABLE not_supported_types " + "(not_supported " + notSupportedType + ") " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
        });
    }
}

16 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithoutColumnComments() {
    TableIdentifier identifier = TableIdentifier.of("default", "without_comment_table");
    shell.executeStatement("CREATE EXTERNAL TABLE without_comment_table (" + "t_int INT,  " + "t_string STRING) " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    List<Object[]> rows = shell.executeStatement("DESCRIBE default.without_comment_table");
    replacedert.replacedertEquals(icebergTable.schema().columns().size(), rows.size());
    for (int i = 0; i < icebergTable.schema().columns().size(); i++) {
        Types.NestedField field = icebergTable.schema().columns().get(i);
        replacedert.replacedertNull(field.doc());
        replacedert.replacedertArrayEquals(new Object[] { field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), "from deserializer" }, rows.get(i));
    }
}

16 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testDropTableWithAppendedData() throws IOException {
    TableIdentifier identifier = TableIdentifier.of("default", "customers");
    testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, SPEC, FileFormat.PARQUET, ImmutableList.of());
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    testTables.appendIcebergTable(shell.getHiveConf(), icebergTable, FileFormat.PARQUET, null, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
    shell.executeStatement("DROP TABLE customers");
}

16 Source : TestHiveIcebergStorageHandlerNoScan.java
with Apache License 2.0
from apache

@Test
public void testCreateTableWithColumnComments() {
    TableIdentifier identifier = TableIdentifier.of("default", "comment_table");
    shell.executeStatement("CREATE EXTERNAL TABLE comment_table (" + "t_int INT COMMENT 'int column',  " + "t_string STRING COMMENT 'string column') " + "STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' " + testTables.locationForCreateTableSQL(identifier));
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    List<Object[]> rows = shell.executeStatement("DESCRIBE default.comment_table");
    replacedert.replacedertEquals(icebergTable.schema().columns().size(), rows.size());
    for (int i = 0; i < icebergTable.schema().columns().size(); i++) {
        Types.NestedField field = icebergTable.schema().columns().get(i);
        replacedert.replacedertArrayEquals(new Object[] { field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), field.doc() }, rows.get(i));
    }
}

16 Source : TestFlinkTableSink.java
with Apache License 2.0
from apache

@Test
public void testReplaceParreplacedions() throws Exception {
    replacedume.replacedumeFalse("Flink unbounded streaming does not support overwrite operation", isStreamingJob);
    String tableName = "test_parreplacedion";
    sql("CREATE TABLE %s(id INT, data VARCHAR) PARreplacedIONED BY (data) WITH ('write.format.default'='%s')", tableName, format.name());
    Table parreplacedionedTable = validationCatalog.loadTable(TableIdentifier.of(icebergNamespace, tableName));
    sql("INSERT INTO %s SELECT 1, 'a'", tableName);
    sql("INSERT INTO %s SELECT 2, 'b'", tableName);
    sql("INSERT INTO %s SELECT 3, 'c'", tableName);
    SimpleDataUtil.replacedertTableRecords(parreplacedionedTable, Lists.newArrayList(SimpleDataUtil.createRecord(1, "a"), SimpleDataUtil.createRecord(2, "b"), SimpleDataUtil.createRecord(3, "c")));
    sql("INSERT OVERWRITE %s SELECT 4, 'b'", tableName);
    sql("INSERT OVERWRITE %s SELECT 5, 'a'", tableName);
    SimpleDataUtil.replacedertTableRecords(parreplacedionedTable, Lists.newArrayList(SimpleDataUtil.createRecord(5, "a"), SimpleDataUtil.createRecord(4, "b"), SimpleDataUtil.createRecord(3, "c")));
    sql("INSERT OVERWRITE %s PARreplacedION (data='a') SELECT 6", tableName);
    SimpleDataUtil.replacedertTableRecords(parreplacedionedTable, Lists.newArrayList(SimpleDataUtil.createRecord(6, "a"), SimpleDataUtil.createRecord(4, "b"), SimpleDataUtil.createRecord(3, "c")));
    sql("DROP TABLE IF EXISTS %s.%s", flinkDatabase, tableName);
}

16 Source : TestFlinkCatalogDatabase.java
with Apache License 2.0
from apache

@Test
public void testListTables() {
    replacedert.replacedertFalse("Namespace should not already exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
    sql("CREATE DATABASE %s", flinkDatabase);
    sql("USE CATALOG %s", catalogName);
    sql("USE %s", DATABASE);
    replacedert.replacedertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
    replacedert.replacedertEquals("Should not list any tables", 0, sql("SHOW TABLES").size());
    validationCatalog.createTable(TableIdentifier.of(icebergNamespace, "tl"), new Schema(Types.NestedField.optional(0, "id", Types.LongType.get())));
    List<Object[]> tables = sql("SHOW TABLES");
    replacedert.replacedertEquals("Only 1 table", 1, tables.size());
    replacedert.replacedertEquals("Table name should match", "tl", tables.get(0)[0]);
}

16 Source : TestFlinkSource.java
with Apache License 2.0
from apache

@Override
protected List<Row> runWithProjection(String... projected) throws Exception {
    TableSchema.Builder builder = TableSchema.builder();
    TableSchema schema = FlinkSchemaUtil.toSchema(FlinkSchemaUtil.convert(catalog.loadTable(TableIdentifier.of("default", "t")).schema()));
    for (String field : projected) {
        TableColumn column = schema.getTableColumn(field).get();
        builder.field(column.getName(), column.getType());
    }
    return run(FlinkSource.forRowData().project(builder.build()), Maps.newHashMap(), "", projected);
}

16 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testLoadNamespaceMeta() throws IOException {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
    TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
    TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
    TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
    Lists.newArrayList(tbl1, tbl2, tbl3, tbl4).forEach(t -> catalog.createTable(t, SCHEMA, ParreplacedionSpec.unparreplacedioned()));
    catalog.loadNamespaceMetadata(Namespace.of("db"));
    replacedertHelpers.replacedertThrows("Should fail to load namespace doesn't exist", NoSuchNamespaceException.clreplaced, "Namespace does not exist: ", () -> {
        catalog.loadNamespaceMetadata(Namespace.of("db", "db2", "ns2"));
    });
}

16 Source : TestHadoopCatalog.java
with Apache License 2.0
from apache

@Test
public void testTableName() throws Exception {
    Configuration conf = new Configuration();
    String warehousePath = temp.newFolder().getAbsolutePath();
    HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
    TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
    catalog.buildTable(tableIdent, SCHEMA).withParreplacedionSpec(SPEC).create();
    Table table = catalog.loadTable(tableIdent);
    replacedert.replacedertEquals("Name must match", "hadoop.db.ns1.ns2.tbl", table.name());
    TableIdentifier snapshotsTableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl", "snapshots");
    Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
    replacedert.replacedertEquals("Name must match", "hadoop.db.ns1.ns2.tbl.snapshots", snapshotsTable.name());
}

16 Source : BaseMetastoreCatalog.java
with Apache License 2.0
from apache

private Table loadMetadataTable(TableIdentifier identifier) {
    String tableName = identifier.name();
    MetadataTableType type = MetadataTableType.from(tableName);
    if (type != null) {
        TableIdentifier baseTableIdentifier = TableIdentifier.of(identifier.namespace().levels());
        TableOperations ops = newTableOps(baseTableIdentifier);
        if (ops.current() == null) {
            throw new NoSuchTableException("Table does not exist: %s", baseTableIdentifier);
        }
        return MetadataTableUtils.createMetadataTableInstance(ops, name(), baseTableIdentifier, identifier, type);
    } else {
        throw new NoSuchTableException("Table does not exist: %s", identifier);
    }
}

16 Source : GlueToIcebergConverter.java
with Apache License 2.0
from apache

static TableIdentifier toTableId(Table table) {
    return TableIdentifier.of(table.databaseName(), table.name());
}

16 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testRenameTable() {
    String namespace = createNamespace();
    String tableName = createTable(namespace);
    Table table = glueCatalog.loadTable(TableIdentifier.of(namespace, tableName));
    // rename table
    String newTableName = tableName + "_2";
    glueCatalog.renameTable(TableIdentifier.of(namespace, tableName), TableIdentifier.of(namespace, newTableName));
    Table renamedTable = glueCatalog.loadTable(TableIdentifier.of(namespace, newTableName));
    replacedert.replacedertEquals(table.location(), renamedTable.location());
    replacedert.replacedertEquals(table.schema().toString(), renamedTable.schema().toString());
    replacedert.replacedertEquals(table.spec(), renamedTable.spec());
    replacedert.replacedertEquals(table.currentSnapshot(), renamedTable.currentSnapshot());
}

16 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testListTables() {
    String namespace = createNamespace();
    replacedert.replacedertTrue("list namespace should have nothing before table creation", glueCatalog.listTables(Namespace.of(namespace)).isEmpty());
    String tableName = createTable(namespace);
    List<TableIdentifier> tables = glueCatalog.listTables(Namespace.of(namespace));
    replacedert.replacedertEquals(1, tables.size());
    replacedert.replacedertEquals(TableIdentifier.of(namespace, tableName), tables.get(0));
}

16 Source : GlueCatalogTableTest.java
with Apache License 2.0
from apache

@Test
public void testRenameTable_failToDeleteOldTable() {
    String namespace = createNamespace();
    String tableName = createTable(namespace);
    TableIdentifier id = TableIdentifier.of(namespace, tableName);
    Table table = glueCatalog.loadTable(id);
    // delete the old table metadata, so that drop old table will fail
    String newTableName = tableName + "_2";
    glue.updateTable(UpdateTableRequest.builder().databaseName(namespace).tableInput(TableInput.builder().name(tableName).parameters(Maps.newHashMap()).build()).build());
    replacedertHelpers.replacedertThrows("should fail to rename", ValidationException.clreplaced, "Input Glue table is not an iceberg table", () -> glueCatalog.renameTable(TableIdentifier.of(namespace, tableName), TableIdentifier.of(namespace, newTableName)));
    replacedertHelpers.replacedertThrows("renamed table should be deleted", EnreplacedyNotFoundException.clreplaced, "not found", () -> glue.getTable(GetTableRequest.builder().databaseName(namespace).name(newTableName).build()));
}

15 Source : CustomCatalogs.java
with Apache License 2.0
from apache

private static Pair<Catalog, TableIdentifier> catalogAndIdentifier(SparkSession spark, String path) {
    String[] currentNamespace = new String[] { spark.catalog().currentDatabase() };
    List<String> nameParts = Splitter.on(".").splitToList(path);
    return SparkUtil.catalogAndIdentifier(nameParts, s -> loadCatalog(spark, s), (n, t) -> TableIdentifier.of(Namespace.of(n), t), loadCatalog(spark, ICEBERG_DEFAULT_CATALOG), currentNamespace);
}

15 Source : TestSparkReaderDeletes.java
with Apache License 2.0
from apache

@Override
protected Table createTable(String name, Schema schema, ParreplacedionSpec spec) {
    Table table = catalog.createTable(TableIdentifier.of("default", name), schema);
    TableOperations ops = ((BaseTable) table).operations();
    TableMetadata meta = ops.current();
    ops.commit(meta, meta.upgradeToFormatVersion(2));
    return table;
}

15 Source : TestIcebergSourceTablesBase.java
with Apache License 2.0
from apache

@Test
public void testAllMetadataTablesWithStagedCommits() throws Exception {
    TableIdentifier tableIdentifier = TableIdentifier.of("db", "stage_aggregate_table_test");
    Table table = createTable(tableIdentifier, SCHEMA, ParreplacedionSpec.builderFor(SCHEMA).idenreplacedy("id").build());
    table.updateProperties().set(TableProperties.WRITE_AUDIT_PUBLISH_ENABLED, "true").commit();
    spark.conf().set("spark.wap.id", "1234567");
    Dataset<Row> df1 = spark.createDataFrame(Lists.newArrayList(new SimpleRecord(1, "a")), SimpleRecord.clreplaced);
    Dataset<Row> df2 = spark.createDataFrame(Lists.newArrayList(new SimpleRecord(2, "b")), SimpleRecord.clreplaced);
    df1.select("id", "data").write().format("iceberg").mode("append").save(loadLocation(tableIdentifier));
    // add a second file
    df2.select("id", "data").write().format("iceberg").mode("append").save(loadLocation(tableIdentifier));
    List<Row> actualAllData = spark.read().format("iceberg").load(loadLocation(tableIdentifier, "all_data_files")).collectAsList();
    List<Row> actualAllManifests = spark.read().format("iceberg").load(loadLocation(tableIdentifier, "all_manifests")).collectAsList();
    List<Row> actualAllEntries = spark.read().format("iceberg").load(loadLocation(tableIdentifier, "all_entries")).collectAsList();
    replacedert.replacedertTrue("Stage table should have some snapshots", table.snapshots().iterator().hasNext());
    replacedert.replacedertEquals("Stage table should have null currentSnapshot", null, table.currentSnapshot());
    replacedert.replacedertEquals("Actual results should have two rows", 2, actualAllData.size());
    replacedert.replacedertEquals("Actual results should have two rows", 2, actualAllManifests.size());
    replacedert.replacedertEquals("Actual results should have two rows", 2, actualAllEntries.size());
}

See More Examples