org.apache.iceberg.StructLike

Here are the examples of the java api org.apache.iceberg.StructLike taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

65 Examples 7

19 Source : FixedIcebergWriterPool.java
with Apache License 2.0
from Netflix

@Override
public boolean isClosed(StructLike parreplacedion) {
    return !pool.containsKey(parreplacedion);
}

19 Source : FixedIcebergWriterPool.java
with Apache License 2.0
from Netflix

/**
 * Attempts to close all writers and produce {@link DataFile}s. If a writer is already closed, then it will
 * produce a {@code null} which will be excluded from the resulting list.
 */
@Override
public List<DataFile> closeAll() throws IOException, UncheckedIOException {
    List<DataFile> dataFiles = new ArrayList<>();
    for (StructLike parreplacedion : pool.keySet()) {
        DataFile dataFile = close(parreplacedion);
        if (dataFile != null) {
            dataFiles.add(dataFile);
        }
    }
    return dataFiles;
}

19 Source : FixedIcebergWriterPool.java
with Apache License 2.0
from Netflix

@Override
public DataFile close(StructLike parreplacedion) throws IOException, UncheckedIOException {
    IcebergWriter writer = pool.get(parreplacedion);
    if (writer == null) {
        throw new RuntimeException("writer does not exist in writer pool");
    }
    try {
        return writer.close();
    } finally {
        pool.remove(parreplacedion);
    }
}

19 Source : TestSparkDataFile.java
with Apache License 2.0
from apache

private void checkStructLike(StructLike expected, StructLike actual) {
    replacedert.replacedertEquals("Struct size should match", expected.size(), actual.size());
    for (int i = 0; i < expected.size(); i++) {
        replacedert.replacedertEquals("Struct values must match", expected.get(i, Object.clreplaced), actual.get(i, Object.clreplaced));
    }
}

19 Source : StructInternalRow.java
with Apache License 2.0
from apache

clreplaced StructInternalRow extends InternalRow {

    private final Types.StructType type;

    private StructLike struct;

    StructInternalRow(Types.StructType type) {
        this.type = type;
    }

    private StructInternalRow(Types.StructType type, StructLike struct) {
        this.type = type;
        this.struct = struct;
    }

    public StructInternalRow setStruct(StructLike newStruct) {
        this.struct = newStruct;
        return this;
    }

    @Override
    public int numFields() {
        return struct.size();
    }

    @Override
    public void setNullAt(int i) {
        throw new UnsupportedOperationException("StructInternalRow is read-only");
    }

    @Override
    public void update(int i, Object value) {
        throw new UnsupportedOperationException("StructInternalRow is read-only");
    }

    @Override
    public InternalRow copy() {
        return this;
    }

    @Override
    public boolean isNullAt(int ordinal) {
        return struct.get(ordinal, Object.clreplaced) == null;
    }

    @Override
    public boolean getBoolean(int ordinal) {
        return struct.get(ordinal, Boolean.clreplaced);
    }

    @Override
    public byte getByte(int ordinal) {
        return (byte) (int) struct.get(ordinal, Integer.clreplaced);
    }

    @Override
    public short getShort(int ordinal) {
        return (short) (int) struct.get(ordinal, Integer.clreplaced);
    }

    @Override
    public int getInt(int ordinal) {
        Object integer = struct.get(ordinal, Object.clreplaced);
        if (integer instanceof Integer) {
            return (int) integer;
        } else if (integer instanceof LocalDate) {
            return (int) ((LocalDate) integer).toEpochDay();
        } else {
            throw new IllegalStateException("Unknown type for int field. Type name: " + integer.getClreplaced().getName());
        }
    }

    @Override
    public long getLong(int ordinal) {
        Object longVal = struct.get(ordinal, Object.clreplaced);
        if (longVal instanceof Long) {
            return (long) longVal;
        } else if (longVal instanceof OffsetDateTime) {
            return Duration.between(Instant.EPOCH, (OffsetDateTime) longVal).toNanos() / 1000;
        } else if (longVal instanceof LocalDate) {
            return ((LocalDate) longVal).toEpochDay();
        } else {
            throw new IllegalStateException("Unknown type for long field. Type name: " + longVal.getClreplaced().getName());
        }
    }

    @Override
    public float getFloat(int ordinal) {
        return struct.get(ordinal, Float.clreplaced);
    }

    @Override
    public double getDouble(int ordinal) {
        return struct.get(ordinal, Double.clreplaced);
    }

    @Override
    public Decimal getDecimal(int ordinal, int precision, int scale) {
        return isNullAt(ordinal) ? null : getDecimalInternal(ordinal, precision, scale);
    }

    private Decimal getDecimalInternal(int ordinal, int precision, int scale) {
        return Decimal.apply(struct.get(ordinal, BigDecimal.clreplaced));
    }

    @Override
    public UTF8String getUTF8String(int ordinal) {
        return isNullAt(ordinal) ? null : getUTF8StringInternal(ordinal);
    }

    private UTF8String getUTF8StringInternal(int ordinal) {
        CharSequence seq = struct.get(ordinal, CharSequence.clreplaced);
        return UTF8String.fromString(seq.toString());
    }

    @Override
    public byte[] getBinary(int ordinal) {
        return isNullAt(ordinal) ? null : getBinaryInternal(ordinal);
    }

    private byte[] getBinaryInternal(int ordinal) {
        Object bytes = struct.get(ordinal, Object.clreplaced);
        // should only be either ByteBuffer or byte[]
        if (bytes instanceof ByteBuffer) {
            return ByteBuffers.toByteArray((ByteBuffer) bytes);
        } else if (bytes instanceof byte[]) {
            return (byte[]) bytes;
        } else {
            throw new IllegalStateException("Unknown type for binary field. Type name: " + bytes.getClreplaced().getName());
        }
    }

    @Override
    public CalendarInterval getInterval(int ordinal) {
        throw new UnsupportedOperationException("Unsupported type: interval");
    }

    @Override
    public InternalRow getStruct(int ordinal, int numFields) {
        return isNullAt(ordinal) ? null : getStructInternal(ordinal, numFields);
    }

    private InternalRow getStructInternal(int ordinal, int numFields) {
        return new StructInternalRow(type.fields().get(ordinal).type().replacedtructType(), struct.get(ordinal, StructLike.clreplaced));
    }

    @Override
    public ArrayData getArray(int ordinal) {
        return isNullAt(ordinal) ? null : getArrayInternal(ordinal);
    }

    private ArrayData getArrayInternal(int ordinal) {
        return collectionToArrayData(type.fields().get(ordinal).type().asListType().elementType(), struct.get(ordinal, Collection.clreplaced));
    }

    @Override
    public MapData getMap(int ordinal) {
        return isNullAt(ordinal) ? null : getMapInternal(ordinal);
    }

    private MapData getMapInternal(int ordinal) {
        return mapToMapData(type.fields().get(ordinal).type().asMapType(), struct.get(ordinal, Map.clreplaced));
    }

    @Override
    @SuppressWarnings("checkstyle:CyclomaticComplexity")
    public Object get(int ordinal, DataType dataType) {
        if (isNullAt(ordinal)) {
            return null;
        }
        if (dataType instanceof IntegerType) {
            return getInt(ordinal);
        } else if (dataType instanceof LongType) {
            return getLong(ordinal);
        } else if (dataType instanceof StringType) {
            return getUTF8StringInternal(ordinal);
        } else if (dataType instanceof FloatType) {
            return getFloat(ordinal);
        } else if (dataType instanceof DoubleType) {
            return getDouble(ordinal);
        } else if (dataType instanceof DecimalType) {
            DecimalType decimalType = (DecimalType) dataType;
            return getDecimalInternal(ordinal, decimalType.precision(), decimalType.scale());
        } else if (dataType instanceof BinaryType) {
            return getBinaryInternal(ordinal);
        } else if (dataType instanceof StructType) {
            return getStructInternal(ordinal, ((StructType) dataType).size());
        } else if (dataType instanceof ArrayType) {
            return getArrayInternal(ordinal);
        } else if (dataType instanceof MapType) {
            return getMapInternal(ordinal);
        } else if (dataType instanceof BooleanType) {
            return getBoolean(ordinal);
        } else if (dataType instanceof ByteType) {
            return getByte(ordinal);
        } else if (dataType instanceof ShortType) {
            return getShort(ordinal);
        } else if (dataType instanceof DateType) {
            return getInt(ordinal);
        } else if (dataType instanceof TimestampType) {
            return getLong(ordinal);
        }
        return null;
    }

    private MapData mapToMapData(Types.MapType mapType, Map<?, ?> map) {
        // make a defensive copy to ensure entries do not change
        List<Map.Entry<?, ?>> entries = ImmutableList.copyOf(map.entrySet());
        return new ArrayBasedMapData(collectionToArrayData(mapType.keyType(), Lists.transform(entries, Map.Entry::getKey)), collectionToArrayData(mapType.valueType(), Lists.transform(entries, Map.Entry::getValue)));
    }

    private ArrayData collectionToArrayData(Type elementType, Collection<?> values) {
        switch(elementType.typeId()) {
            case BOOLEAN:
            case INTEGER:
            case DATE:
            case TIME:
            case LONG:
            case TIMESTAMP:
            case FLOAT:
            case DOUBLE:
                return fillArray(values, array -> (pos, value) -> array[pos] = value);
            case STRING:
                return fillArray(values, array -> (BiConsumer<Integer, CharSequence>) (pos, seq) -> array[pos] = UTF8String.fromString(seq.toString()));
            case FIXED:
            case BINARY:
                return fillArray(values, array -> (BiConsumer<Integer, ByteBuffer>) (pos, buf) -> array[pos] = ByteBuffers.toByteArray(buf));
            case DECIMAL:
                return fillArray(values, array -> (BiConsumer<Integer, BigDecimal>) (pos, dec) -> array[pos] = Decimal.apply(dec));
            case STRUCT:
                return fillArray(values, array -> (BiConsumer<Integer, StructLike>) (pos, tuple) -> array[pos] = new StructInternalRow(elementType.replacedtructType(), tuple));
            case LIST:
                return fillArray(values, array -> (BiConsumer<Integer, Collection<?>>) (pos, list) -> array[pos] = collectionToArrayData(elementType.asListType().elementType(), list));
            case MAP:
                return fillArray(values, array -> (BiConsumer<Integer, Map<?, ?>>) (pos, map) -> array[pos] = mapToMapData(elementType.asMapType(), map));
            default:
                throw new UnsupportedOperationException("Unsupported array element type: " + elementType);
        }
    }

    @SuppressWarnings("unchecked")
    private <T> GenericArrayData fillArray(Collection<?> values, Function<Object[], BiConsumer<Integer, T>> makeSetter) {
        Object[] array = new Object[values.size()];
        BiConsumer<Integer, T> setter = makeSetter.apply(array);
        int index = 0;
        for (Object value : values) {
            if (value == null) {
                array[index] = null;
            } else {
                setter.accept(index, (T) value);
            }
            index += 1;
        }
        return new GenericArrayData(array);
    }
}

19 Source : StructInternalRow.java
with Apache License 2.0
from apache

public StructInternalRow setStruct(StructLike newStruct) {
    this.struct = newStruct;
    return this;
}

19 Source : SparkAppenderFactory.java
with Apache License 2.0
from apache

@Override
public DataWriter<InternalRow> newDataWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    return new DataWriter<>(newAppender(file.encryptingOutputFile(), format), format, file.encryptingOutputFile().location(), spec, parreplacedion, file.keyMetadata());
}

19 Source : TestHelper.java
with Apache License 2.0
from apache

public void appendToTable(StructLike parreplacedion, List<Record> records) throws IOException {
    appender().appendToTable(parreplacedion, records);
}

19 Source : TestHelper.java
with Apache License 2.0
from apache

public DataFile writeFile(StructLike parreplacedion, List<Record> records) throws IOException {
    return appender().writeFile(parreplacedion, records);
}

19 Source : TestHiveIcebergStorageHandlerLocalScan.java
with Apache License 2.0
from apache

private void runCreateAndReadTest(TableIdentifier identifier, String createSQL, Schema expectedSchema, ParreplacedionSpec expectedSpec, Map<StructLike, List<Record>> data) throws IOException {
    shell.executeStatement(createSQL);
    org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
    replacedert.replacedertEquals(expectedSchema.replacedtruct(), icebergTable.schema().replacedtruct());
    replacedert.replacedertEquals(expectedSpec, icebergTable.spec());
    List<Record> expected = Lists.newArrayList();
    for (StructLike parreplacedion : data.keySet()) {
        testTables.appendIcebergTable(shell.getHiveConf(), icebergTable, fileFormat, parreplacedion, data.get(parreplacedion));
        expected.addAll(data.get(parreplacedion));
    }
    List<Object[]> descRows = shell.executeStatement("SELECT * FROM " + identifier.toString());
    List<Record> records = HiveIcebergTestUtils.valueForRow(icebergTable.schema(), descRows);
    HiveIcebergTestUtils.validateData(expected, records, 0);
}

19 Source : FlinkAppenderFactory.java
with Apache License 2.0
from apache

@Override
public DataWriter<RowData> newDataWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    return new DataWriter<>(newAppender(file.encryptingOutputFile(), format), format, file.encryptingOutputFile().location(), spec, parreplacedion, file.keyMetadata());
}

19 Source : FlinkCatalog.java
with Apache License 2.0
from apache

@Override
public List<CatalogParreplacedionSpec> listParreplacedions(ObjectPath tablePath) throws TableNotExistException, TableNotParreplacedionedException, CatalogException {
    Table table = loadIcebergTable(tablePath);
    if (table.spec().isUnparreplacedioned()) {
        throw new TableNotParreplacedionedException(icebergCatalog.name(), tablePath);
    }
    Set<CatalogParreplacedionSpec> set = Sets.newHashSet();
    try (CloseableIterable<FileScanTask> tasks = table.newScan().planFiles()) {
        for (DataFile dataFile : CloseableIterable.transform(tasks, FileScanTask::file)) {
            Map<String, String> map = Maps.newHashMap();
            StructLike structLike = dataFile.parreplacedion();
            ParreplacedionSpec spec = table.specs().get(dataFile.specId());
            for (int i = 0; i < structLike.size(); i++) {
                map.put(spec.fields().get(i).name(), String.valueOf(structLike.get(i, Object.clreplaced)));
            }
            set.add(new CatalogParreplacedionSpec(map));
        }
    } catch (IOException e) {
        throw new CatalogException(String.format("Failed to list parreplacedions of table %s", tablePath), e);
    }
    return Lists.newArrayList(set);
}

19 Source : GenericAppenderHelper.java
with Apache License 2.0
from apache

public DataFile writeFile(StructLike parreplacedion, List<Record> records) throws IOException {
    Preconditions.checkNotNull(table, "table not set");
    File file = tmp.newFile();
    replacedert.replacedertTrue(file.delete());
    return appendToLocalFile(table, file, fileFormat, parreplacedion, records);
}

19 Source : GenericAppenderHelper.java
with Apache License 2.0
from apache

public void appendToTable(StructLike parreplacedion, List<Record> records) throws IOException {
    appendToTable(writeFile(parreplacedion, records));
}

19 Source : InternalRecordWrapper.java
with Apache License 2.0
from apache

public clreplaced InternalRecordWrapper implements StructLike {

    private final Function<Object, Object>[] transforms;

    private StructLike wrapped = null;

    @SuppressWarnings("unchecked")
    public InternalRecordWrapper(Types.StructType struct) {
        this.transforms = struct.fields().stream().map(field -> converter(field.type())).toArray(length -> (Function<Object, Object>[]) Array.newInstance(Function.clreplaced, length));
    }

    private static Function<Object, Object> converter(Type type) {
        switch(type.typeId()) {
            case DATE:
                return date -> DateTimeUtil.daysFromDate((LocalDate) date);
            case TIME:
                return time -> DateTimeUtil.microsFromTime((LocalTime) time);
            case TIMESTAMP:
                if (((Types.TimestampType) type).shouldAdjustToUTC()) {
                    return timestamp -> DateTimeUtil.microsFromTimestamptz((OffsetDateTime) timestamp);
                } else {
                    return timestamp -> DateTimeUtil.microsFromTimestamp((LocalDateTime) timestamp);
                }
            case FIXED:
                return bytes -> ByteBuffer.wrap((byte[]) bytes);
            case STRUCT:
                InternalRecordWrapper wrapper = new InternalRecordWrapper(type.replacedtructType());
                return struct -> wrapper.wrap((StructLike) struct);
            default:
        }
        return null;
    }

    public StructLike get() {
        return wrapped;
    }

    public InternalRecordWrapper wrap(StructLike record) {
        this.wrapped = record;
        return this;
    }

    @Override
    public int size() {
        return wrapped.size();
    }

    @Override
    public <T> T get(int pos, Clreplaced<T> javaClreplaced) {
        if (transforms[pos] != null) {
            return javaClreplaced.cast(transforms[pos].apply(wrapped.get(pos, Object.clreplaced)));
        }
        return wrapped.get(pos, javaClreplaced);
    }

    @Override
    public <T> void set(int pos, T value) {
        throw new UnsupportedOperationException("Cannot update InternalRecordWrapper");
    }
}

19 Source : InternalRecordWrapper.java
with Apache License 2.0
from apache

public InternalRecordWrapper wrap(StructLike record) {
    this.wrapped = record;
    return this;
}

19 Source : GenericAppenderFactory.java
with Apache License 2.0
from apache

@Override
public org.apache.iceberg.io.DataWriter<Record> newDataWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    return new org.apache.iceberg.io.DataWriter<>(newAppender(file.encryptingOutputFile(), format), format, file.encryptingOutputFile().location(), spec, parreplacedion, file.keyMetadata());
}

19 Source : StructLikeWrapper.java
with Apache License 2.0
from apache

public StructLikeWrapper set(StructLike newStruct) {
    this.struct = newStruct;
    this.hashCode = null;
    return this;
}

19 Source : StructCopy.java
with Apache License 2.0
from apache

static StructLike copy(StructLike struct) {
    return new StructCopy(struct);
}

19 Source : StructProjection.java
with Apache License 2.0
from apache

public StructProjection wrap(StructLike newStruct) {
    this.struct = newStruct;
    return this;
}

19 Source : ResidualEvaluator.java
with Apache License 2.0
from apache

/**
 * Returns a residual expression for the given parreplacedion values.
 *
 * @param parreplacedionData parreplacedion data values
 * @return the residual of this evaluator's expression from the parreplacedion values
 */
public Expression residualFor(StructLike parreplacedionData) {
    return new ResidualVisitor().eval(parreplacedionData);
}

19 Source : Evaluator.java
with Apache License 2.0
from apache

public boolean eval(StructLike data) {
    return new EvalVisitor().eval(data);
}

19 Source : BoundPredicate.java
with Apache License 2.0
from apache

public boolean test(StructLike struct) {
    return test(term().eval(struct));
}

18 Source : FixedIcebergWriterPool.java
with Apache License 2.0
from Netflix

@Override
public void write(StructLike parreplacedion, Record record) {
    IcebergWriter writer = pool.get(parreplacedion);
    if (writer == null) {
        throw new RuntimeException("writer does not exist in writer pool");
    }
    writer.write(record);
}

18 Source : NoOpPartitioner.java
with Apache License 2.0
from Netflix

@Override
public StructLike parreplacedion(StructLike record) {
    return null;
}

18 Source : DefaultIcebergWriter.java
with Apache License 2.0
from Netflix

/**
 * Opens a {@link FileAppender} using a {@link StructLike} parreplacedion key
 * for a specific {@link FileFormat}.
 * <p>
 * A filename is automatically generated for this appender.
 * <p>
 * Supports Parquet. Avro, Orc, and others unsupported.
 */
@Override
public void open(StructLike newParreplacedionKey) throws IOException {
    parreplacedionKey = newParreplacedionKey;
    Path path = new Path(table.location(), generateFilename());
    String location = locationProvider.newDataLocation(path.toString());
    logger.info("opening new {} file appender {}", format, location);
    file = HadoopOutputFile.fromLocation(location, config.getHadoopConfig());
    switch(format) {
        case PARQUET:
            appender = Parquet.write(file).schema(table.schema()).createWriterFunc(GenericParquetWriter::buildWriter).setAll(table.properties()).overwrite().build();
            break;
        case AVRO:
        default:
            throw new UnsupportedOperationException("Cannot write using an unsupported file format " + format);
    }
}

18 Source : TestIcebergCTASWithPartition.java
with Apache License 2.0
from dremio

private void verifyParreplacedionValue(String tableFolder, Clreplaced expectedClreplaced, Object expectedValue) {
    Table table = new HadoopTables(new Configuration()).load(tableFolder);
    for (FileScanTask fileScanTask : table.newScan().planFiles()) {
        StructLike structLike = fileScanTask.file().parreplacedion();
        replacedert.replacedertEquals(structLike.get(0, expectedClreplaced), expectedValue);
    }
}

18 Source : SparkAppenderFactory.java
with Apache License 2.0
from apache

@Override
public EqualityDeleteWriter<InternalRow> newEqDeleteWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    Preconditions.checkState(equalityFieldIds != null && equalityFieldIds.length > 0, "Equality field ids shouldn't be null or empty when creating equality-delete writer");
    Preconditions.checkNotNull(eqDeleteRowSchema, "Equality delete row schema shouldn't be null when creating equality-delete writer");
    try {
        switch(format) {
            case PARQUET:
                return Parquet.writeDeletes(file.encryptingOutputFile()).createWriterFunc(msgType -> SparkParquetWriters.buildWriter(lazyEqDeleteSparkType(), msgType)).overwrite().rowSchema(eqDeleteRowSchema).withSpec(spec).withParreplacedion(parreplacedion).equalityFieldIds(equalityFieldIds).withKeyMetadata(file.keyMetadata()).buildEqualityWriter();
            case AVRO:
                return Avro.writeDeletes(file.encryptingOutputFile()).createWriterFunc(ignored -> new SparkAvroWriter(lazyEqDeleteSparkType())).overwrite().rowSchema(eqDeleteRowSchema).withSpec(spec).withParreplacedion(parreplacedion).equalityFieldIds(equalityFieldIds).withKeyMetadata(file.keyMetadata()).buildEqualityWriter();
            default:
                throw new UnsupportedOperationException("Cannot write equality-deletes for unsupported file format: " + format);
        }
    } catch (IOException e) {
        throw new UncheckedIOException("Failed to create new equality delete writer", e);
    }
}

18 Source : TestTables.java
with Apache License 2.0
from apache

/**
 * Append more data to the table.
 * @param configuration The configuration used during the table creation
 * @param table The table to append
 * @param format The file format used for writing the data
 * @param parreplacedion The parreplacedion to write to
 * @param records The records with which should be added to the table
 * @throws IOException If there is an error writing data
 */
public void appendIcebergTable(Configuration configuration, Table table, FileFormat format, StructLike parreplacedion, List<Record> records) throws IOException {
    TestHelper helper = new TestHelper(configuration, null, null, null, null, format, temp);
    helper.setTable(table);
    if (!records.isEmpty()) {
        helper.appendToTable(helper.writeFile(parreplacedion, records));
    }
}

18 Source : StructLikeSet.java
with Apache License 2.0
from apache

@Override
public boolean add(StructLike struct) {
    return wrapperSet.add(StructLikeWrapper.forType(type).set(struct));
}

18 Source : StructLikeMap.java
with Apache License 2.0
from apache

@Override
public T put(StructLike key, T value) {
    return wrapperMap.put(StructLikeWrapper.forType(type).set(key), value);
}

18 Source : PartitionSet.java
with Apache License 2.0
from apache

public boolean contains(int specId, StructLike struct) {
    Set<StructLike> parreplacedionSet = parreplacedionSetById.get(specId);
    if (parreplacedionSet != null) {
        return parreplacedionSet.contains(struct);
    }
    return false;
}

18 Source : PartitionSet.java
with Apache License 2.0
from apache

public boolean remove(int specId, StructLike struct) {
    Set<StructLike> parreplacedionSet = parreplacedionSetById.get(specId);
    if (parreplacedionSet != null) {
        return parreplacedionSet.remove(struct);
    }
    return false;
}

18 Source : ManifestFileUtil.java
with Apache License 2.0
from apache

private static boolean canContain(List<FieldSummary<?>> summaries, StructLike struct) {
    if (struct.size() != summaries.size()) {
        return false;
    }
    // if any value is not contained, the struct is not contained and this can return early
    for (int pos = 0; pos < summaries.size(); pos += 1) {
        Object value = struct.get(pos, Object.clreplaced);
        if (!summaries.get(pos).canContain(value)) {
            return false;
        }
    }
    return true;
}

18 Source : OutputFileFactory.java
with Apache License 2.0
from apache

/**
 * Generates EncryptedOutputFile for ParreplacedionedWriter.
 */
public EncryptedOutputFile newOutputFile(StructLike parreplacedion) {
    String newDataLocation = locations.newDataLocation(spec, parreplacedion, generateFilename());
    OutputFile rawOutputFile = io.newOutputFile(newDataLocation);
    return encryptionManager.encrypt(rawOutputFile);
}

18 Source : DataWriter.java
with Apache License 2.0
from apache

public clreplaced DataWriter<T> implements Closeable {

    private final FileAppender<T> appender;

    private final FileFormat format;

    private final String location;

    private final ParreplacedionSpec spec;

    private final StructLike parreplacedion;

    private final ByteBuffer keyMetadata;

    private DataFile dataFile = null;

    public DataWriter(FileAppender<T> appender, FileFormat format, String location, ParreplacedionSpec spec, StructLike parreplacedion, EncryptionKeyMetadata keyMetadata) {
        this.appender = appender;
        this.format = format;
        this.location = location;
        this.spec = spec;
        this.parreplacedion = parreplacedion;
        this.keyMetadata = keyMetadata != null ? keyMetadata.buffer() : null;
    }

    public void add(T row) {
        appender.add(row);
    }

    public long length() {
        return appender.length();
    }

    @Override
    public void close() throws IOException {
        if (dataFile == null) {
            appender.close();
            this.dataFile = DataFiles.builder(spec).withFormat(format).withPath(location).withParreplacedion(parreplacedion).withEncryptionKeyMetadata(keyMetadata).withFileSizeInBytes(appender.length()).withMetrics(appender.metrics()).withSplitOffsets(appender.splitOffsets()).build();
        }
    }

    public DataFile toDataFile() {
        Preconditions.checkState(dataFile != null, "Cannot create data file from unclosed writer");
        return dataFile;
    }
}

18 Source : StructProjection.java
with Apache License 2.0
from apache

public clreplaced StructProjection implements StructLike {

    /**
     * Creates a projecting wrapper for {@link StructLike} rows.
     * <p>
     * This projection does not work with repeated types like lists and maps.
     *
     * @param schema schema of rows wrapped by this projection
     * @param ids field ids from the row schema to project
     * @return a wrapper to project rows
     */
    public static StructProjection create(Schema schema, Set<Integer> ids) {
        StructType structType = schema.replacedtruct();
        return new StructProjection(structType, TypeUtil.select(structType, ids));
    }

    /**
     * Creates a projecting wrapper for {@link StructLike} rows.
     * <p>
     * This projection does not work with repeated types like lists and maps.
     *
     * @param dataSchema schema of rows wrapped by this projection
     * @param projectedSchema result schema of the projected rows
     * @return a wrapper to project rows
     */
    public static StructProjection create(Schema dataSchema, Schema projectedSchema) {
        return new StructProjection(dataSchema.replacedtruct(), projectedSchema.replacedtruct());
    }

    private final StructType type;

    private final int[] positionMap;

    private final StructProjection[] nestedProjections;

    private StructLike struct;

    private StructProjection(StructType structType, StructType projection) {
        this.type = projection;
        this.positionMap = new int[projection.fields().size()];
        this.nestedProjections = new StructProjection[projection.fields().size()];
        // set up the projection positions and any nested projections that are needed
        List<Types.NestedField> dataFields = structType.fields();
        for (int pos = 0; pos < positionMap.length; pos += 1) {
            Types.NestedField projectedField = projection.fields().get(pos);
            boolean found = false;
            for (int i = 0; !found && i < dataFields.size(); i += 1) {
                Types.NestedField dataField = dataFields.get(i);
                if (projectedField.fieldId() == dataField.fieldId()) {
                    found = true;
                    positionMap[pos] = i;
                    switch(projectedField.type().typeId()) {
                        case STRUCT:
                            nestedProjections[pos] = new StructProjection(dataField.type().replacedtructType(), projectedField.type().replacedtructType());
                            break;
                        case MAP:
                        case LIST:
                            throw new IllegalArgumentException(String.format("Cannot project list or map field: %s", projectedField));
                        default:
                            nestedProjections[pos] = null;
                    }
                }
            }
            if (!found) {
                throw new IllegalArgumentException(String.format("Cannot find field %s in %s", projectedField, structType));
            }
        }
    }

    public StructProjection wrap(StructLike newStruct) {
        this.struct = newStruct;
        return this;
    }

    @Override
    public int size() {
        return type.fields().size();
    }

    @Override
    public <T> T get(int pos, Clreplaced<T> javaClreplaced) {
        if (nestedProjections[pos] != null) {
            return javaClreplaced.cast(nestedProjections[pos].wrap(struct.get(positionMap[pos], StructLike.clreplaced)));
        }
        return struct.get(positionMap[pos], javaClreplaced);
    }

    @Override
    public <T> void set(int pos, T value) {
        throw new UnsupportedOperationException("Cannot set fields in a TypeProjection");
    }
}

18 Source : BoundPredicate.java
with Apache License 2.0
from apache

@Override
public Boolean eval(StructLike struct) {
    return test(term().eval(struct));
}

17 Source : PartitionTable.java
with Apache License 2.0
from trinodb

private Map<StructLikeWrapper, Parreplacedion> getParreplacedions(TableScan tableScan) {
    try (CloseableIterable<FileScanTask> fileScanTasks = tableScan.planFiles()) {
        Map<StructLikeWrapper, Parreplacedion> parreplacedions = new HashMap<>();
        for (FileScanTask fileScanTask : fileScanTasks) {
            DataFile dataFile = fileScanTask.file();
            Types.StructType structType = fileScanTask.spec().parreplacedionType();
            StructLike parreplacedionStruct = dataFile.parreplacedion();
            StructLikeWrapper parreplacedionWrapper = StructLikeWrapper.forType(structType).set(parreplacedionStruct);
            if (!parreplacedions.containsKey(parreplacedionWrapper)) {
                Parreplacedion parreplacedion = new Parreplacedion(idToTypeMapping, nonParreplacedionPrimitiveColumns, parreplacedionStruct, dataFile.recordCount(), dataFile.fileSizeInBytes(), toMap(dataFile.lowerBounds()), toMap(dataFile.upperBounds()), dataFile.nullValueCounts(), dataFile.columnSizes());
                parreplacedions.put(parreplacedionWrapper, parreplacedion);
                continue;
            }
            Parreplacedion parreplacedion = parreplacedions.get(parreplacedionWrapper);
            parreplacedion.incrementFileCount();
            parreplacedion.incrementRecordCount(dataFile.recordCount());
            parreplacedion.incrementSize(dataFile.fileSizeInBytes());
            parreplacedion.updateMin(toMap(dataFile.lowerBounds()), dataFile.nullValueCounts(), dataFile.recordCount());
            parreplacedion.updateMax(toMap(dataFile.upperBounds()), dataFile.nullValueCounts(), dataFile.recordCount());
            parreplacedion.updateNullCount(dataFile.nullValueCounts());
        }
        return parreplacedions;
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}

17 Source : IcebergUtil.java
with Apache License 2.0
from trinodb

public static Map<Integer, String> getParreplacedionKeys(FileScanTask scanTask) {
    StructLike parreplacedion = scanTask.file().parreplacedion();
    ParreplacedionSpec spec = scanTask.spec();
    Map<ParreplacedionField, Integer> fieldToIndex = getIdenreplacedyParreplacedions(spec);
    Map<Integer, String> parreplacedionKeys = new HashMap<>();
    fieldToIndex.forEach((field, index) -> {
        int id = field.sourceId();
        org.apache.iceberg.types.Type type = spec.schema().findType(id);
        Clreplaced<?> javaClreplaced = type.typeId().javaClreplaced();
        Object value = parreplacedion.get(index, javaClreplaced);
        if (value == null) {
            parreplacedionKeys.put(id, null);
        } else {
            String parreplacedionValue;
            if (type.typeId() == FIXED || type.typeId() == BINARY) {
                // this is safe because Iceberg ParreplacedionData directly wraps the byte array
                parreplacedionValue = new String(((ByteBuffer) value).array(), UTF_8);
            } else {
                parreplacedionValue = value.toString();
            }
            parreplacedionKeys.put(id, parreplacedionValue);
        }
    });
    return Collections.unmodifiableMap(parreplacedionKeys);
}

17 Source : FixedIcebergWriterPool.java
with Apache License 2.0
from Netflix

@Override
public void open(StructLike parreplacedion) throws IOException {
    if (pool.size() >= maximumPoolSize) {
        throw new IOException("problem opening writer; maximum writer pool size (" + maximumPoolSize + ") exceeded");
    }
    if (!isClosed(parreplacedion)) {
        return;
    }
    IcebergWriter writer = factory.newIcebergWriter();
    writer.open(parreplacedion);
    pool.put(parreplacedion, writer);
}

17 Source : ManifestScanTableFunction.java
with Apache License 2.0
from dremio

private List<SplitAndParreplacedionInfo> getSplitsFromDataFile(DataFile dataFile, int maxOutputCount) {
    ParreplacedionProtobuf.NormalizedParreplacedionInfo.Builder parreplacedionInfoBuilder = ParreplacedionProtobuf.NormalizedParreplacedionInfo.newBuilder();
    parreplacedionInfoBuilder.setId(String.valueOf(1));
    // get table parreplacedion spec
    StructLike parreplacedionStruct = dataFile.parreplacedion();
    for (int partColPos = 0; partColPos < parreplacedionStruct.size(); ++partColPos) {
        ParreplacedionProtobuf.ParreplacedionValue.Builder parreplacedionValueBuilder = ParreplacedionProtobuf.ParreplacedionValue.newBuilder();
        parreplacedionValueBuilder.setColumn(parreplacedionCols.get(partColPos));
        writeParreplacedionValue(parreplacedionValueBuilder, parreplacedionStruct.get(partColPos, getParreplacedionColumnClreplaced(partColPos)));
        parreplacedionInfoBuilder.addValues(parreplacedionValueBuilder.build());
    }
    ParreplacedionProtobuf.NormalizedParreplacedionInfo parreplacedionInfo = parreplacedionInfoBuilder.build();
    List<SplitAndParreplacedionInfo> splits = new ArrayList<>();
    int splitCount = 0;
    while (splitCount < maxOutputCount && currentOffset < dataFile.fileSizeInBytes()) {
        long curBlockSize = Math.min(targetSplitSize, dataFile.fileSizeInBytes() - currentOffset);
        ParquetProtobuf.ParquetBlockBasedSplitXAttr splitExtended = ParquetProtobuf.ParquetBlockBasedSplitXAttr.newBuilder().setPath(dataFile.path().toString()).setStart(currentOffset).setLength(curBlockSize).setFileLength(dataFile.fileSizeInBytes()).setLastModificationTime(// todo: set correct file modification time
        1).build();
        currentOffset += curBlockSize;
        ParreplacedionProtobuf.NormalizedDatasetSplitInfo.Builder splitInfo = ParreplacedionProtobuf.NormalizedDatasetSplitInfo.newBuilder().setParreplacedionId(parreplacedionInfo.getId()).setExtendedProperty(splitExtended.toByteString());
        splits.add(new SplitAndParreplacedionInfo(parreplacedionInfo, splitInfo.build()));
        splitCount++;
    }
    return splits;
}

17 Source : FileHelpers.java
with Apache License 2.0
from apache

public static DeleteFile writeDeleteFile(Table table, OutputFile out, StructLike parreplacedion, List<Record> deletes, Schema deleteRowSchema) throws IOException {
    EqualityDeleteWriter<Record> writer = Parquet.writeDeletes(out).forTable(table).withParreplacedion(parreplacedion).rowSchema(deleteRowSchema).createWriterFunc(GenericParquetWriter::buildWriter).overwrite().equalityFieldIds(deleteRowSchema.columns().stream().mapToInt(Types.NestedField::fieldId).toArray()).buildEqualityWriter();
    try (Closeable toClose = writer) {
        writer.deleteAll(deletes);
    }
    return writer.toDeleteFile();
}

17 Source : FileHelpers.java
with Apache License 2.0
from apache

public static DataFile writeDataFile(Table table, OutputFile out, StructLike parreplacedion, List<Record> rows) throws IOException {
    FileAppender<Record> writer = Parquet.write(out).createWriterFunc(GenericParquetWriter::buildWriter).schema(table.schema()).overwrite().build();
    try (Closeable toClose = writer) {
        writer.addAll(rows);
    }
    return DataFiles.builder(table.spec()).withFormat(FileFormat.PARQUET).withPath(out.location()).withParreplacedion(parreplacedion).withFileSizeInBytes(writer.length()).withSplitOffsets(writer.splitOffsets()).withMetrics(writer.metrics()).build();
}

17 Source : GenericAppenderFactory.java
with Apache License 2.0
from apache

@Override
public EqualityDeleteWriter<Record> newEqDeleteWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    Preconditions.checkState(equalityFieldIds != null && equalityFieldIds.length > 0, "Equality field ids shouldn't be null or empty when creating equality-delete writer");
    Preconditions.checkNotNull(eqDeleteRowSchema, "Equality delete row schema shouldn't be null when creating equality-delete writer");
    MetricsConfig metricsConfig = MetricsConfig.fromProperties(config);
    try {
        switch(format) {
            case AVRO:
                return Avro.writeDeletes(file.encryptingOutputFile()).createWriterFunc(DataWriter::create).withParreplacedion(parreplacedion).overwrite().setAll(config).rowSchema(eqDeleteRowSchema).withSpec(spec).withKeyMetadata(file.keyMetadata()).equalityFieldIds(equalityFieldIds).buildEqualityWriter();
            case PARQUET:
                return Parquet.writeDeletes(file.encryptingOutputFile()).createWriterFunc(GenericParquetWriter::buildWriter).withParreplacedion(parreplacedion).overwrite().setAll(config).metricsConfig(metricsConfig).rowSchema(eqDeleteRowSchema).withSpec(spec).withKeyMetadata(file.keyMetadata()).equalityFieldIds(equalityFieldIds).buildEqualityWriter();
            default:
                throw new UnsupportedOperationException("Cannot write equality-deletes for unsupported file format: " + format);
        }
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}

17 Source : GenericAppenderFactory.java
with Apache License 2.0
from apache

@Override
public PositionDeleteWriter<Record> newPosDeleteWriter(EncryptedOutputFile file, FileFormat format, StructLike parreplacedion) {
    MetricsConfig metricsConfig = MetricsConfig.fromProperties(config);
    try {
        switch(format) {
            case AVRO:
                return Avro.writeDeletes(file.encryptingOutputFile()).createWriterFunc(DataWriter::create).withParreplacedion(parreplacedion).overwrite().setAll(config).rowSchema(posDeleteRowSchema).withSpec(spec).withKeyMetadata(file.keyMetadata()).buildPositionWriter();
            case PARQUET:
                return Parquet.writeDeletes(file.encryptingOutputFile()).createWriterFunc(GenericParquetWriter::buildWriter).withParreplacedion(parreplacedion).overwrite().setAll(config).metricsConfig(metricsConfig).rowSchema(posDeleteRowSchema).withSpec(spec).withKeyMetadata(file.keyMetadata()).buildPositionWriter();
            default:
                throw new UnsupportedOperationException("Cannot write pos-deletes for unsupported file format: " + format);
        }
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}

17 Source : StructLikeWrapper.java
with Apache License 2.0
from apache

/**
 * Wrapper to adapt StructLike for use in maps and sets by implementing equals and hashCode.
 */
public clreplaced StructLikeWrapper {

    public static StructLikeWrapper forType(Types.StructType struct) {
        return new StructLikeWrapper(struct);
    }

    private final Comparator<StructLike> comparator;

    private final JavaHash<StructLike> structHash;

    private Integer hashCode;

    private StructLike struct;

    private StructLikeWrapper(Types.StructType type) {
        this.comparator = Comparators.forType(type);
        this.structHash = JavaHash.forType(type);
        this.hashCode = null;
    }

    public StructLikeWrapper set(StructLike newStruct) {
        this.struct = newStruct;
        this.hashCode = null;
        return this;
    }

    public StructLike get() {
        return struct;
    }

    @Override
    public boolean equals(Object other) {
        if (this == other) {
            return true;
        } else if (!(other instanceof StructLikeWrapper)) {
            return false;
        }
        StructLikeWrapper that = (StructLikeWrapper) other;
        if (this.struct == that.struct) {
            return true;
        }
        if (this.struct == null ^ that.struct == null) {
            return false;
        }
        return comparator.compare(this.struct, that.struct) == 0;
    }

    @Override
    public int hashCode() {
        if (hashCode == null) {
            this.hashCode = structHash.hash(struct);
        }
        return hashCode;
    }
}

17 Source : PartitionSet.java
with Apache License 2.0
from apache

public boolean add(int specId, StructLike struct) {
    Set<StructLike> parreplacedionSet = parreplacedionSetById.computeIfAbsent(specId, id -> StructLikeSet.create(parreplacedionTypeById.get(id)));
    return parreplacedionSet.add(struct);
}

17 Source : ManifestFileUtil.java
with Apache License 2.0
from apache

public static boolean canContainAny(ManifestFile manifest, Iterable<StructLike> parreplacedions, Function<Integer, ParreplacedionSpec> specLookup) {
    if (manifest.parreplacedions() == null) {
        return true;
    }
    List<FieldSummary<?>> summaries = summaries(manifest, specLookup);
    for (StructLike parreplacedion : parreplacedions) {
        if (canContain(summaries, parreplacedion)) {
            return true;
        }
    }
    return false;
}

17 Source : EqualityDeleteWriter.java
with Apache License 2.0
from apache

public clreplaced EqualityDeleteWriter<T> implements Closeable {

    private final FileAppender<T> appender;

    private final FileFormat format;

    private final String location;

    private final ParreplacedionSpec spec;

    private final StructLike parreplacedion;

    private final ByteBuffer keyMetadata;

    private final int[] equalityFieldIds;

    private DeleteFile deleteFile = null;

    public EqualityDeleteWriter(FileAppender<T> appender, FileFormat format, String location, ParreplacedionSpec spec, StructLike parreplacedion, EncryptionKeyMetadata keyMetadata, int... equalityFieldIds) {
        this.appender = appender;
        this.format = format;
        this.location = location;
        this.spec = spec;
        this.parreplacedion = parreplacedion;
        this.keyMetadata = keyMetadata != null ? keyMetadata.buffer() : null;
        this.equalityFieldIds = equalityFieldIds;
    }

    public void deleteAll(Iterable<T> rows) {
        appender.addAll(rows);
    }

    public void delete(T row) {
        appender.add(row);
    }

    public long length() {
        return appender.length();
    }

    @Override
    public void close() throws IOException {
        if (deleteFile == null) {
            appender.close();
            this.deleteFile = FileMetadata.deleteFileBuilder(spec).ofEqualityDeletes(equalityFieldIds).withFormat(format).withPath(location).withParreplacedion(parreplacedion).withEncryptionKeyMetadata(keyMetadata).withFileSizeInBytes(appender.length()).withMetrics(appender.metrics()).build();
        }
    }

    public DeleteFile toDeleteFile() {
        Preconditions.checkState(deleteFile != null, "Cannot create delete file from unclosed writer");
        return deleteFile;
    }
}

See More Examples