org.apache.hadoop.hbase.client.Connection.getTable()

Here are the examples of the java api org.apache.hadoop.hbase.client.Connection.getTable() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

202 Examples 7

19 Source : CheckPoint.java
with GNU Lesser General Public License v3.0
from waterguo

public void readFromHBase(Connection conn) throws IOException {
    // Get table object
    Table table = conn.getTable(this.tn);
    // Query row
    Get get = new Get(KEY);
    Result result = table.get(get);
    if (!result.isEmpty()) {
        this.currentSp = Bytes.toLong(get(result, "currentSp"));
        this.serverId = Bytes.toLong(get(result, "serverId"));
        this.createTimestamp = Optional.ofNullable(get(result, "createTimestamp")).map(Bytes::toLong).orElse(0l);
        this.updateTimestamp = Optional.ofNullable(get(result, "updateTimestamp")).map(Bytes::toLong).orElse(0l);
        this.createOrcaVersion = Bytes.toString(get(result, "createOrcaVersion"));
        this.updateorcaVersion = Bytes.toString(get(result, "updateOrcaVersion"));
        this.isActive = Optional.ofNullable(get(result, "isActive")).map(Bytes::toBoolean).orElse(Boolean.FALSE);
    }
}

19 Source : CheckPoint.java
with GNU Lesser General Public License v3.0
from waterguo

/**
 * save changes to hbase
 * @throws IOException
 */
public void updateHBase(Connection conn) throws IOException {
    if (!this.isMutable) {
        throw new OrcaHBaseException("failed to update since it is realy only mode");
    }
    // Get table object
    Table table = conn.getTable(tn);
    // Generate put data
    Put put = new Put(KEY);
    set(put, "currentSp", this.currentSp);
    set(put, "serverId", this.serverId);
    if (this.createTimestamp == 0l) {
        this.createTimestamp = System.currentTimeMillis();
    }
    this.updateTimestamp = System.currentTimeMillis();
    if (this.createOrcaVersion == null) {
        this.createOrcaVersion = Orca._version;
    }
    this.updateorcaVersion = Orca._version;
    set(put, "createTimestamp", this.createTimestamp);
    set(put, "updateTimestamp", this.updateTimestamp);
    set(put, "createOrcaVersion", this.createOrcaVersion);
    set(put, "updateOrcaVersion", this.updateorcaVersion);
    set(put, "isActive", this.isActive);
    // put row
    table.put(put);
}

19 Source : DropIndexTest.java
with Apache License 2.0
from rayokota

@Test
public void testDropEdgeIndex() throws Exception {
    replacedertEquals(0, count(graph.vertices()));
    graph.createIndex(ElementType.EDGE, "b", "key1");
    graph.createIndex(ElementType.EDGE, "b", "key2");
    Vertex v0 = graph.addVertex(T.id, id(0));
    Vertex v1 = graph.addVertex(T.id, id(1));
    Vertex v2 = graph.addVertex(T.id, id(2));
    Vertex v3 = graph.addVertex(T.id, id(3));
    Vertex v4 = graph.addVertex(T.id, id(4));
    v0.addEdge("b", v1, "key1", 1);
    v0.addEdge("b", v2, "key1", 2);
    v0.addEdge("b", v3, "key2", 3);
    v0.addEdge("a", v1, "key1", 1);
    v0.addEdge("b", v4, "key1", 4);
    HBaseGraphConfiguration hconf = graph.configuration();
    Configuration conf = hconf.toHBaseConfiguration();
    Connection conn = graph.connection();
    Table table = conn.getTable(HBaseGraphUtils.getTableName(hconf, Constants.EDGE_INDICES));
    // 5 edge endpoints and 4 indices
    verifyTableCount(table, 5 * 2 + 4 * 2);
    runDropIndex(conf, new String[] { "-t", "edge", "-l", "b", "-p", "key1", "-d", "true", "-rf", "true", "-op", "/tmp" });
    // 5 edge endpoints and 1 index
    verifyTableCount(table, 5 * 2 + 1 * 2);
    table.close();
}

19 Source : DropIndexTest.java
with Apache License 2.0
from rayokota

@Test
public void testDropVertexIndex() throws Exception {
    replacedertEquals(0, count(graph.vertices()));
    graph.createIndex(ElementType.VERTEX, "a", "key1");
    graph.createIndex(ElementType.VERTEX, "b", "key1");
    graph.addVertex(T.id, id(10), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(11), T.label, "a", "key1", 12);
    graph.addVertex(T.id, id(12), T.label, "a", "key2", 12);
    graph.addVertex(T.id, id(13), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(14), T.label, "b", "key1", 11);
    HBaseGraphConfiguration hconf = graph.configuration();
    Configuration conf = hconf.toHBaseConfiguration();
    Connection conn = graph.connection();
    Table table = conn.getTable(HBaseGraphUtils.getTableName(hconf, Constants.VERTEX_INDICES));
    verifyTableCount(table, 4);
    runDropIndex(conf, new String[] { "-t", "vertex", "-l", "a", "-p", "key1", "-d", "true", "-rf", "true", "-op", "/tmp" });
    verifyTableCount(table, 1);
    table.close();
}

19 Source : HBaseIndexTest.java
with Apache License 2.0
from rayokota

@Test
public void testPopulateEdgeIndex() throws Exception {
    replacedertEquals(0, count(graph.vertices()));
    Vertex v0 = graph.addVertex(T.id, id(0));
    Vertex v1 = graph.addVertex(T.id, id(1));
    Vertex v2 = graph.addVertex(T.id, id(2));
    Vertex v3 = graph.addVertex(T.id, id(3));
    Vertex v4 = graph.addVertex(T.id, id(4));
    v0.addEdge("b", v1, "key1", 1);
    v0.addEdge("b", v2, "key1", 2);
    v0.addEdge("b", v3, "key2", 3);
    v0.addEdge("a", v1, "key1", 1);
    v0.addEdge("b", v4, "key1", 4);
    HBaseGraphConfiguration hconf = graph.configuration();
    Connection conn = graph.connection();
    Table table = conn.getTable(HBaseGraphUtils.getTableName(hconf, Constants.EDGE_INDICES));
    // 5 edge endpoints
    verifyTableCount(table, 5 * 2);
    graph.createIndex(ElementType.EDGE, "b", "key1", false, true, false);
    // 5 edge endpoints and 3 indices
    verifyTableCount(table, 5 * 2 + 3 * 2);
    table.close();
}

19 Source : HBaseIndexTest.java
with Apache License 2.0
from rayokota

@Test
public void testPopulateVertexIndex() throws Exception {
    replacedertEquals(0, count(graph.vertices()));
    graph.addVertex(T.id, id(10), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(11), T.label, "a", "key1", 12);
    graph.addVertex(T.id, id(12), T.label, "a", "key2", 12);
    graph.addVertex(T.id, id(13), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(14), T.label, "b", "key1", 11);
    HBaseGraphConfiguration hconf = graph.configuration();
    Connection conn = graph.connection();
    Table table = conn.getTable(HBaseGraphUtils.getTableName(hconf, Constants.VERTEX_INDICES));
    graph.createIndex(ElementType.VERTEX, "a", "key1", false, true, false);
    verifyTableCount(table, 3);
    table.close();
}

19 Source : MockBufferedMutator.java
with Apache License 2.0
from rayokota

/**
 * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they
 * are done.
 *
 * @throws IOException if a remote or network exception occurs.
 */
@Override
public void flush() throws IOException {
    // noinspection EmptyCatchBlock
    try {
        if (conn != null) {
            Object[] results = new Object[mutations.size()];
            conn.getTable(name).batch(mutations, results);
        }
        mutations.clear();
    } catch (InterruptedException e) {
    }
}

19 Source : HBasePageSink.java
with Apache License 2.0
from openlookeng

@Override
public CompletableFuture<?> appendPage(Page page) {
    // For each position within the page
    List<Put> puts = new ArrayList<>();
    try (Connection connection = hbaseConn.createConnection();
        Table table = connection.getTable(TableName.valueOf(tablename))) {
        for (int position = 0; position < page.getPositionCount(); ++position) {
            // Convert Page to a Put, writing and indexing it
            Put put = pageToPut(page, position);
            puts.add(put);
            if (puts.size() >= Constants.PUT_BATCH_SIZE) {
                table.put(puts);
                puts.clear();
            }
        }
        if (!puts.isEmpty()) {
            table.put(puts);
        }
    } catch (IOException e) {
        LOG.error("appendPage PUT rejected by server... cause by %s", e.getMessage());
        throw new PrestoException(HBaseErrorCode.UNEXPECTED_HBASE_ERROR, "Insert into table error", e);
    }
    return NOT_BLOCKED;
}

19 Source : CamelliaHBaseConnection.java
with MIT License
from netease-im

public Table getTable(String tableName) {
    try {
        return connection.getTable(TableName.valueOf(tableName));
    } catch (IOException e) {
        logger.error("getTable error, hbaseResource = {}, tableName = {}", hBaseResource.getUrl(), tableName, e);
        onException();
        throw new CamelliaHBaseException("get table = " + tableName + " error", e);
    }
}

19 Source : Data2HBase1.java
with GNU General Public License v3.0
from monsonlee

/**
 * 批量导入
 *
 * @param connection
 * @throws IOException
 */
private static void batchRowImport(Connection connection) throws IOException {
    Table table = connection.getTable(TableName.valueOf("t3"));
    byte[] columnFamily = "f1".getBytes();
    long startTime = System.currentTimeMillis();
    ArrayList<Put> puts = new ArrayList<Put>();
    for (int i = 0; i < 99999; i++) {
        puts.add(HBaseUtil.createPut(i + "", columnFamily, "c1", i + ""));
        // 每10000条导入一次
        if (i % 10000 == 0) {
            table.put(puts);
            puts.clear();
        }
    }
    table.put(puts);
    table.close();
    System.out.println("共耗时:" + (System.currentTimeMillis() - startTime) + "ms");
}

19 Source : Data2HBase1.java
with GNU General Public License v3.0
from monsonlee

/**
 * 单条数据导入
 *
 * @param connection
 * @return
 * @throws IOException
 */
private static void singleRowImport(Connection connection) throws IOException {
    Table table = connection.getTable(TableName.valueOf("t3"));
    byte[] columnFamily = "f1".getBytes();
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 99999; i++) {
        table.put(HBaseUtil.createPut(i + "", columnFamily, "c1", i + ""));
    }
    table.close();
    System.out.println("共耗时:" + (System.currentTimeMillis() - startTime) + "ms");
}

19 Source : RowCounterCLI.java
with Apache License 2.0
from Kyligence

public static void main(String[] args) throws IOException {
    if (args == null || args.length != 3) {
        logger.info("Usage: hbase org.apache.hadoop.util.RunJar kylin-job-latest.jar org.apache.kylin.job.tools.RowCounterCLI [HTABLE_NAME] [STARTKEY] [ENDKEY]");
        // if no enough arguments provided, return with above message
        return;
    }
    logger.info(args[0]);
    String htableName = args[0];
    logger.info(args[1]);
    byte[] startKey = BytesUtil.fromReadableText(args[1]);
    logger.info(args[2]);
    byte[] endKey = BytesUtil.fromReadableText(args[2]);
    if (startKey == null) {
        logger.info("startkey is null ");
    } else {
        logger.info("startkey lenght: {}", startKey.length);
    }
    if (logger.isInfoEnabled()) {
        logger.info("start key in binary: {}", Bytes.toStringBinary(startKey));
        logger.info("end key in binary: {}", Bytes.toStringBinary(endKey));
    }
    Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();
    Scan scan = new Scan();
    scan.setCaching(512);
    scan.setCacheBlocks(true);
    scan.setStartRow(startKey);
    scan.setStopRow(endKey);
    logger.info("My Scan {}", scan);
    try (Connection conn = ConnectionFactory.createConnection(conf);
        Table tableInterface = conn.getTable(TableName.valueOf(htableName))) {
        Iterator<Result> iterator = tableInterface.getScanner(scan).iterator();
        int counter = 0;
        while (iterator.hasNext()) {
            iterator.next();
            counter++;
            if (counter % 1000 == 1) {
                logger.info("number of rows: {}", counter);
            }
        }
        logger.info("number of rows: {}", counter);
    }
}

19 Source : HBaseAccessorTest.java
with Apache License 2.0
from greenplum-db

/*
     * Helper for test setup.
     * Adds a table name and prepares for table creation
     */
private void prepareTableOpen() throws Exception {
    // Set table name
    context.setDataSource(tableName);
    hbaseConfiguration = mock(Configuration.clreplaced);
    when(HBaseConfiguration.create()).thenReturn(hbaseConfiguration);
    // Make sure we mock static functions in ConnectionFactory
    hbaseConnection = mock(Connection.clreplaced);
    when(ConnectionFactory.createConnection(hbaseConfiguration)).thenReturn(hbaseConnection);
    table = mock(Table.clreplaced);
    when(hbaseConnection.getTable(TableName.valueOf(tableName))).thenReturn(table);
}

19 Source : TestReplicationEndpoint.java
with Apache License 2.0
from fengchen8086

private void doPut(final Connection connection, final byte[] row) throws IOException {
    try (Table t = connection.getTable(tableName)) {
        Put put = new Put(row);
        put.add(famName, row, row);
        t.put(put);
    }
}

19 Source : QuotaUtil.java
with Apache License 2.0
from fengchen8086

private static void doDelete(final Connection connection, final Delete delete) throws IOException {
    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
        table.delete(delete);
    }
}

19 Source : QuotaUtil.java
with Apache License 2.0
from fengchen8086

/*
   * ========================================================================= HTable helpers
   */
private static void doPut(final Connection connection, final Put put) throws IOException {
    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
        table.put(put);
    }
}

19 Source : VisibilityClient.java
with Apache License 2.0
from fengchen8086

/**
 * @param connection the Connection instance to use.
 * @param user
 * @return labels, the given user is globally authorized for.
 * @throws Throwable
 */
public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable {
    try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
        Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {

            ServerRpcController controller = new ServerRpcController();

            BlockingRpcCallback<GetAuthsResponse> rpcCallback = new BlockingRpcCallback<GetAuthsResponse>();

            public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
                GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
                getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
                service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback);
                GetAuthsResponse response = rpcCallback.get();
                if (controller.failedOnException()) {
                    throw controller.getFailedOn();
                }
                return response;
            }
        };
        Map<byte[], GetAuthsResponse> result = table.coprocessorService(VisibilityLabelsService.clreplaced, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
        // There will be exactly one region for labels
        return result.values().iterator().next();
    // table and so one entry in result Map.
    }
}

19 Source : VisibilityClient.java
with Apache License 2.0
from fengchen8086

private static VisibilityLabelsResponse setOrClearAuths(Connection connection, final String[] auths, final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable {
    try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
        Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {

            ServerRpcController controller = new ServerRpcController();

            BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = new BlockingRpcCallback<VisibilityLabelsResponse>();

            public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
                SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder();
                setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
                for (String auth : auths) {
                    if (auth.length() > 0) {
                        setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth)));
                    }
                }
                if (setOrClear) {
                    service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback);
                } else {
                    service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback);
                }
                VisibilityLabelsResponse response = rpcCallback.get();
                if (controller.failedOnException()) {
                    throw controller.getFailedOn();
                }
                return response;
            }
        };
        Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(VisibilityLabelsService.clreplaced, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
        // There will be exactly one region for labels
        return result.values().iterator().next();
    // table and so one entry in result Map.
    }
}

19 Source : VisibilityClient.java
with Apache License 2.0
from fengchen8086

/**
 * Utility method for adding labels to the system.
 *
 * @param connection
 * @param labels
 * @return VisibilityLabelsResponse
 * @throws Throwable
 */
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) throws Throwable {
    try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
        Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {

            ServerRpcController controller = new ServerRpcController();

            BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = new BlockingRpcCallback<VisibilityLabelsResponse>();

            public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
                VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
                for (String label : labels) {
                    if (label.length() > 0) {
                        VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder();
                        newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label)));
                        builder.addVisLabel(newBuilder.build());
                    }
                }
                service.addLabels(controller, builder.build(), rpcCallback);
                VisibilityLabelsResponse response = rpcCallback.get();
                if (controller.failedOnException()) {
                    throw controller.getFailedOn();
                }
                return response;
            }
        };
        Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(VisibilityLabelsService.clreplaced, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
        // There will be exactly one region for labels
        return result.values().iterator().next();
    // table and so one entry in result Map.
    }
}

19 Source : QuotaTableUtil.java
with Apache License 2.0
from fengchen8086

protected static Result[] doGet(final Connection connection, final List<Get> gets) throws IOException {
    try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
        return table.get(gets);
    }
}

19 Source : QuotaTableUtil.java
with Apache License 2.0
from fengchen8086

/*
   * ========================================================================= HTable helpers
   */
protected static Result doGet(final Connection connection, final Get get) throws IOException {
    try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
        return table.get(get);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * It sums up the value returned from various regions. In case qualifier is
 * null, summation of all the column qualifiers in the given family is done.
 * @param tableName
 * @param ci
 * @param scan
 * @return sum <S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> S sum(final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return sum(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * This is the client side interface/handler for calling the median method for a
 * given cf-cq combination. This method collects the necessary parameters
 * to compute the median and returns the median.
 * @param tableName
 * @param ci
 * @param scan
 * @return R the median
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R median(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return median(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * This is the client side interface/handle for calling the std method for a
 * given cf-cq combination. It was necessary to add one more call stack as its
 * return type should be a decimal value, irrespective of what
 * columninterpreter says. So, this methods collects the necessary parameters
 * to compute the std and returns the double value.
 * @param tableName
 * @param ci
 * @param scan
 * @return <R, S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> double std(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return std(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * It gives the maximum value of a column for a given column family for the
 * given range. In case qualifier is null, a max of all values for the given
 * family is returned.
 * @param tableName
 * @param ci
 * @param scan
 * @return max val <R>
 * @throws Throwable
 *           The caller is supposed to handle the exception as they are thrown
 *           & propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R max(final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return max(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * It computes average while fetching sum and row count from all the
 * corresponding regions. Approach is to compute a global sum of region level
 * sum and rowcount and then compute the average.
 * @param tableName
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return getAvgArgs(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * It gives the minimum value of a column for a given column family for the
 * given range. In case qualifier is null, a min of all values for the given
 * family is returned.
 * @param tableName
 * @param ci
 * @param scan
 * @return min val <R>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R min(final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return min(table, ci, scan);
    }
}

19 Source : AggregationClient.java
with Apache License 2.0
from fengchen8086

/**
 * It gives the row count, by summing up the individual results obtained from
 * regions. In case the qualifier is null, FirstKeyValueFilter is used to
 * optimised the operation. In case qualifier is provided, I can't use the
 * filter as it may set the flag to skip to next row, but the value read is
 * not of the given filter: in this case, this particular row will not be
 * counted ==> an error.
 * @param tableName
 * @param ci
 * @param scan
 * @return <R, S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    try (Table table = connection.getTable(tableName)) {
        return rowCount(table, ci, scan);
    }
}

19 Source : HbaseTableFactory.java
with Apache License 2.0
from bbossgroups

@Override
public Table getTable(TableName tableName, ExecutorService executorService) {
    try {
        return connection.getTable(tableName, executorService);
    } catch (IOException e) {
        throw new HbaseSystemException(e);
    }
}

19 Source : HbaseTableFactory.java
with Apache License 2.0
from bbossgroups

@Override
public Table getTable(TableName tableName) {
    try {
        return connection.getTable(tableName);
    } catch (IOException e) {
        throw new HbaseSystemException(e);
    }
}

19 Source : HBaseConnectionTest.java
with Apache License 2.0
from awslabs

@Test
public void scanTableWithCallerException() throws IOException {
    logger.info("scanTable: enter");
    when(mockConnection.getTable(any(org.apache.hadoop.hbase.TableName.clreplaced))).thenReturn(mockTable);
    when(mockTable.getScanner(any(Scan.clreplaced))).thenReturn(mockScanner);
    TableName tableName = org.apache.hadoop.hbase.TableName.valueOf("schema1", "table1");
    try {
        connection.scanTable(tableName, mock(Scan.clreplaced), (ResultScanner scanner) -> {
            throw new RuntimeException("Do not retry!");
        });
    } catch (RuntimeException ex) {
        replacedertTrue(UnrecoverableException.clreplaced.equals(ex.getCause().getClreplaced()));
        logger.info("listTableNamesByNamespaceRetryExhausted: Encountered expected exception.", ex);
    }
    replacedertTrue(connection.isHealthy());
    replacedertEquals(0, connection.getRetries());
    verify(mockConnection, atLeastOnce()).getTable(anyObject());
    verify(mockTable, atLeastOnce()).getScanner(any(Scan.clreplaced));
    logger.info("scanTable: exit");
}

19 Source : HBaseConnectionTest.java
with Apache License 2.0
from awslabs

@Test
public void scanTable() throws IOException {
    logger.info("scanTable: enter");
    when(mockConnection.getTable(any(org.apache.hadoop.hbase.TableName.clreplaced))).thenReturn(mockTable);
    when(mockTable.getScanner(any(Scan.clreplaced))).thenReturn(mockScanner);
    TableName tableName = org.apache.hadoop.hbase.TableName.valueOf("schema1", "table1");
    boolean result = connection.scanTable(tableName, mock(Scan.clreplaced), (ResultScanner scanner) -> scanner != null);
    replacedertTrue(result);
    replacedertTrue(connection.isHealthy());
    replacedertEquals(0, connection.getRetries());
    verify(mockConnection, atLeastOnce()).getTable(anyObject());
    verify(mockTable, atLeastOnce()).getScanner(any(Scan.clreplaced));
    logger.info("scanTable: exit");
}

19 Source : HBaseConnectionTest.java
with Apache License 2.0
from awslabs

@Test
public void scanTableWithRetry() throws IOException {
    logger.info("scanTableWithRetry: enter");
    when(mockConnection.getTable(any(org.apache.hadoop.hbase.TableName.clreplaced))).thenReturn(mockTable);
    when(mockTable.getScanner(any(Scan.clreplaced))).thenAnswer(new Answer() {

        private int count = 0;

        public Object answer(InvocationOnMock invocation) {
            if (++count == 1) {
                // first invocation should throw
                return new RuntimeException("Retryable");
            }
            return mockScanner;
        }
    });
    TableName tableName = org.apache.hadoop.hbase.TableName.valueOf("schema1", "table1");
    boolean result = connection.scanTable(tableName, mock(Scan.clreplaced), (ResultScanner scanner) -> scanner != null);
    replacedertTrue(result);
    replacedertTrue(connection.isHealthy());
    replacedertEquals(1, connection.getRetries());
    verify(mockConnection, atLeastOnce()).getTable(anyObject());
    verify(mockTable, atLeastOnce()).getScanner(any(Scan.clreplaced));
    logger.info("scanTableWithRetry: exit");
}

19 Source : HBaseConnectionTest.java
with Apache License 2.0
from awslabs

@Test
public void scanTableRetriesExhausted() throws IOException {
    logger.info("scanTableRetriesExhausted: enter");
    when(mockConnection.getTable(any(org.apache.hadoop.hbase.TableName.clreplaced))).thenReturn(mockTable);
    when(mockTable.getScanner(any(Scan.clreplaced))).thenThrow(new RuntimeException("Retryable"));
    TableName tableName = org.apache.hadoop.hbase.TableName.valueOf("schema1", "table1");
    try {
        connection.scanTable(tableName, mock(Scan.clreplaced), (ResultScanner scanner) -> scanner != null);
    } catch (RuntimeException ex) {
        logger.info("listTableNamesByNamespaceRetryExhausted: Encountered expected exception.", ex);
    }
    replacedertFalse(connection.isHealthy());
    replacedertEquals(3, connection.getRetries());
    verify(mockConnection, atLeastOnce()).getTable(anyObject());
    verify(mockTable, atLeastOnce()).getScanner(any(Scan.clreplaced));
    logger.info("scanTableRetriesExhausted: exit");
}

19 Source : HBaseConnection.java
with Apache License 2.0
from awslabs

/**
 * Used to perform a scan of the given table, scan, and resultProcessor.
 *
 * @param tableName The HBase table to scan.
 * @param scan The HBase scan (filters, etc...) to run.
 * @param resultProcessor The ResultProcessor that will be used to process the results of the scan.
 * @param <T> The return type of the ResultProcessor and this this method.
 * @return The result produced by the resultProcessor when it has completed processing all results of the scan.
 */
public <T> T scanTable(TableName tableName, Scan scan, ResultProcessor<T> resultProcessor) {
    return callWithReconnectAndRetry(() -> {
        try (Table table = connection.getTable(tableName);
            ResultScanner scanner = table.getScanner(scan)) {
            try {
                return resultProcessor.scan(scanner);
            } catch (RuntimeException ex) {
                throw new UnrecoverableException("Scanner threw exception - " + ex.getMessage(), ex);
            }
        }
    });
}

19 Source : HBCKActions.java
with Apache License 2.0
from apache

/**
 * Deletes the middle region from the regions of the given table from Meta table
 * Removes whole of the "info" column family
 */
private void deleteRegionFromMeta(String tname) throws IOException, InterruptedException {
    TableName tn = TableName.valueOf(tname);
    try (Connection connection = ConnectionFactory.createConnection(conf)) {
        Table metaTable = connection.getTable(TableName.valueOf("hbase:meta"));
        List<RegionInfo> ris = HBCKMetaTableAccessor.getTableRegions(connection, tn);
        System.out.println(String.format("Current Regions of the table " + tn.getNamereplacedtring() + " in Meta before deletion of the region are: " + ris));
        RegionInfo ri = ris.get(ris.size() / 2);
        System.out.println("Deleting Region " + ri.getRegionNamereplacedtring());
        byte[] key = HBCKMetaTableAccessor.getMetaKeyForRegion(ri);
        Delete delete = new Delete(key);
        delete.addFamily(Bytes.toBytes("info"));
        metaTable.delete(delete);
        Thread.sleep(500);
        ris = HBCKMetaTableAccessor.getTableRegions(connection, tn);
        System.out.println("Current Regions of the table " + tn.getNamereplacedtring() + " in Meta after deletion of the region are: " + ris);
    }
}

19 Source : HBaseUpdatablePageSource.java
with Apache License 2.0
from analysys

@Override
public void deleteRows(Block rowIds) {
    try (Connection conn = clientManager.createConnection();
        Table table = conn.getTable(TableName.valueOf(schemaName + ":" + tableName))) {
        List<Delete> deletes = new ArrayList<>();
        Delete delete;
        for (int i = 0; i < rowIds.getPositionCount(); i++) {
            int len = rowIds.getSliceLength(i);
            Slice slice = rowIds.getSlice(i, 0, len);
            delete = new Delete(slice.getBytes());
            deletes.add(delete);
        }
        if (deletes.size() > 0)
            table.delete(deletes);
    } catch (Exception e) {
        log.error(e.getMessage(), e);
    }
}

18 Source : HbaseImpl.java
with MIT License
from heisedebaise

private Table getTable(String tableName) throws IOException {
    Connection connection = this.connection.get();
    if (connection == null) {
        connection = ConnectionFactory.createConnection(configuration);
        this.connection.set(connection);
    }
    return connection.getTable(TableName.valueOf(tableName));
}

18 Source : HbaseDB.java
with MIT License
from fengdis

/**
 * getGid
 * @param row
 * @throws Exception
 */
public static long getGid(String row) throws Exception {
    Table table_gid = connection.getTable(TableName.valueOf("gid"));
    // HTable table_gid = new HTable(TableName.valueOf("gid"), connection);
    long id = table_gid.incrementColumnValue(Bytes.toBytes(row), Bytes.toBytes("gid"), Bytes.toBytes(row), 1);
    table_gid.close();
    return id;
}

18 Source : TestLoadIncrementalHFilesSplitRecovery.java
with Apache License 2.0
from fengchen8086

/**
 * Populate table with known values.
 */
private void populateTable(final Connection connection, TableName table, int value) throws Exception {
    // create HFiles for different column families
    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
    Path bulk1 = buildBulkFiles(table, value);
    try (Table t = connection.getTable(table)) {
        lih.doBulkLoad(bulk1, (HTable) t);
    }
}

18 Source : AccessControlClient.java
with Apache License 2.0
from fengchen8086

/**
 * @param connection The Connection instance to use
 * Grant global permissions for the specified user.
 */
public static void grant(final Connection connection, final String userName, final Permission.Action... actions) throws Throwable {
    PayloadCarryingRpcController controller = ((ClusterConnection) connection).getRpcControllerFactory().newController();
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
        ProtobufUtil.grant(controller, getAccessControlServiceStub(table), userName, actions);
    }
}

18 Source : AccessControlClient.java
with Apache License 2.0
from fengchen8086

/**
 * Revokes the permission on the table for the specified user.
 * @param connection The Connection instance to use
 * @param namespace
 * @param userName
 * @param actions
 * @throws Throwable
 */
public static void revoke(final Connection connection, final String namespace, final String userName, final Permission.Action... actions) throws Throwable {
    PayloadCarryingRpcController controller = ((ClusterConnection) connection).getRpcControllerFactory().newController();
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
        ProtobufUtil.revoke(controller, getAccessControlServiceStub(table), userName, namespace, actions);
    }
}

18 Source : AccessControlClient.java
with Apache License 2.0
from fengchen8086

/**
 * Revoke global permissions for the specified user.
 * @param connection The Connection instance to use
 */
public static void revoke(final Connection connection, final String userName, final Permission.Action... actions) throws Throwable {
    PayloadCarryingRpcController controller = ((ClusterConnection) connection).getRpcControllerFactory().newController();
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
        ProtobufUtil.revoke(controller, getAccessControlServiceStub(table), userName, actions);
    }
}

18 Source : AccessControlClient.java
with Apache License 2.0
from fengchen8086

/**
 * Grants permission on the specified namespace for the specified user.
 * @param connection The Connection instance to use
 * @param namespace
 * @param userName
 * @param actions
 * @throws Throwable
 */
public static void grant(final Connection connection, final String namespace, final String userName, final Permission.Action... actions) throws Throwable {
    PayloadCarryingRpcController controller = ((ClusterConnection) connection).getRpcControllerFactory().newController();
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
        ProtobufUtil.grant(controller, getAccessControlServiceStub(table), userName, namespace, actions);
    }
}

18 Source : HBCKMetaTableAccessor.java
with Apache License 2.0
from apache

/**
 * Converts and adds the preplaceded <code>RegionInfo</code> parameter into a valid 'info:regioninfo'
 * cell value in 'hbase:meta'.
 * @param conn a valid, open connection.
 * @param region the region to be inserted in meta.
 * @throws IOException on any issues related with scanning meta table
 */
public static void addRegionToMeta(Connection conn, RegionInfo region) throws IOException {
    Put put = makePutFromRegionInfo(region, System.currentTimeMillis());
    addRegionStateToPut(put, RegionState.State.CLOSED);
    conn.getTable(TableName.META_TABLE_NAME).put(put);
}

17 Source : HbaseTest2.java
with Apache License 2.0
from morcble

// 批量查找数据
public static void scanData(String tableName, String startRow, String stopRow) throws IOException {
    init();
    Table table = connection.getTable(TableName.valueOf(tableName));
    Scan scan = new Scan();
    // scan.setStartRow(Bytes.toBytes(startRow));
    // scan.setStopRow(Bytes.toBytes(stopRow));
    ResultScanner resultScanner = table.getScanner(scan);
    for (Result result : resultScanner) {
        showCell(result);
    }
    table.close();
    close();
}

17 Source : HbaseTest2.java
with Apache License 2.0
from morcble

// 插入数据
public static void insterRow(String tableName, String rowkey, String colFamily, String col, String val) throws IOException {
    init();
    Table table = connection.getTable(TableName.valueOf(tableName));
    Put put = new Put(Bytes.toBytes(rowkey));
    put.addColumn(Bytes.toBytes(colFamily), Bytes.toBytes(col), Bytes.toBytes(val));
    table.put(put);
    // 批量插入
    /*
         * List<Put> putList = new ArrayList<Put>(); puts.add(put);
         * table.put(putList);
         */
    table.close();
    close();
}

17 Source : HbaseTest2.java
with Apache License 2.0
from morcble

// 根据rowkey查找数据
public static void getData(String tableName, String rowkey, String colFamily, String col) throws IOException {
    init();
    Table table = connection.getTable(TableName.valueOf(tableName));
    Get get = new Get(Bytes.toBytes(rowkey));
    // 获取指定列族数据
    // get.addFamily(Bytes.toBytes(colFamily));
    // 获取指定列数据
    // get.addColumn(Bytes.toBytes(colFamily),Bytes.toBytes(col));
    Result result = table.get(get);
    showCell(result);
    table.close();
    close();
}

17 Source : HBaseStorage.java
with Apache License 2.0
from ManbangGroup

private Table getTable(String namespace, String tableName) throws Exception {
    Table table = connection.getTable(TableName.valueOf(namespace, tableName));
    table.setOperationTimeout(TABLE_OPERATION_TIMEOUT);
    return table;
}

17 Source : GridTableHBaseBenchmark.java
with Apache License 2.0
from Kyligence

private static void testColumnScan(Connection conn, List<Pair<Integer, Integer>> colScans) throws IOException {
    Stats stats = new Stats("COLUMN_SCAN");
    Table table = conn.getTable(TableName.valueOf(TEST_TABLE));
    try {
        stats.markStart();
        int nLogicCols = colScans.size();
        int nLogicRows = colScans.get(0).getSecond() - colScans.get(0).getFirst();
        Scan[] scans = new Scan[nLogicCols];
        ResultScanner[] scanners = new ResultScanner[nLogicCols];
        for (int i = 0; i < nLogicCols; i++) {
            scans[i] = new Scan();
            scans[i].addFamily(CF);
            scanners[i] = table.getScanner(scans[i]);
        }
        for (int i = 0; i < nLogicRows; i++) {
            for (int c = 0; c < nLogicCols; c++) {
                Result r = scanners[c].next();
                stats.consume(r);
            }
            dot(i, nLogicRows);
        }
        stats.markEnd();
    } finally {
        IOUtils.closeQuietly(table);
    }
}

See More Examples