Here are the examples of the java api org.apache.hadoop.hbase.HTableDescriptor taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
697 Examples
19
Source : DrillHBaseTable.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
public clreplaced DrillHBaseTable extends DrillTable implements DrillHBaseConstants {
private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillHBaseTable.clreplaced);
private HTableDescriptor tableDesc;
public DrillHBaseTable(String storageEngineName, HBaseStoragePlugin plugin, HBaseScanSpec scanSpec) {
super(storageEngineName, plugin, scanSpec);
try (Admin admin = plugin.getConnection().getAdmin()) {
tableDesc = admin.getTableDescriptor(TableName.valueOf(scanSpec.getTableName()));
} catch (IOException e) {
throw UserException.dataReadError().message("Failure while loading table %s in database %s.", scanSpec.getTableName(), storageEngineName).addContext("Message: ", e.getMessage()).build(logger);
}
}
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
ArrayList<RelDataType> typeList = new ArrayList<>();
ArrayList<String> fieldNameList = new ArrayList<>();
fieldNameList.add(ROW_KEY);
typeList.add(typeFactory.createSqlType(SqlTypeName.ANY));
Set<byte[]> families = tableDesc.getFamiliesKeys();
for (byte[] family : families) {
fieldNameList.add(Bytes.toString(family));
typeList.add(typeFactory.createMapType(typeFactory.createSqlType(SqlTypeName.VARCHAR), typeFactory.createSqlType(SqlTypeName.ANY)));
}
return typeFactory.createStructType(typeList, fieldNameList);
}
}
19
Source : BinaryTableGroupScan.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
@JsonTypeName("maprdb-binary-scan")
public clreplaced BinaryTableGroupScan extends MapRDBGroupScan implements DrillHBaseConstants {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BinaryTableGroupScan.clreplaced);
public static final String TABLE_BINARY = "binary";
private HBaseScanSpec hbaseScanSpec;
private HTableDescriptor hTableDesc;
private MapRDBTableStats tableStats;
@JsonCreator
public BinaryTableGroupScan(@JsonProperty("userName") final String userName, @JsonProperty("hbaseScanSpec") HBaseScanSpec scanSpec, @JsonProperty("storage") FileSystemConfig storagePluginConfig, @JsonProperty("format") MapRDBFormatPluginConfig formatPluginConfig, @JsonProperty("columns") List<SchemaPath> columns, @JacksonInject StoragePluginRegistry pluginRegistry) throws IOException, ExecutionSetupException {
this(userName, (FileSystemPlugin) pluginRegistry.getPlugin(storagePluginConfig), (MapRDBFormatPlugin) pluginRegistry.getFormatPlugin(storagePluginConfig, formatPluginConfig), scanSpec, columns);
}
public BinaryTableGroupScan(String userName, FileSystemPlugin storagePlugin, MapRDBFormatPlugin formatPlugin, HBaseScanSpec scanSpec, List<SchemaPath> columns) {
super(storagePlugin, formatPlugin, columns, userName);
this.hbaseScanSpec = scanSpec;
init();
}
public BinaryTableGroupScan(String userName, FileSystemPlugin storagePlugin, MapRDBFormatPlugin formatPlugin, HBaseScanSpec scanSpec, List<SchemaPath> columns, MapRDBTableStats tableStats) {
super(storagePlugin, formatPlugin, columns, userName);
this.hbaseScanSpec = scanSpec;
this.tableStats = tableStats;
init();
}
/**
* Private constructor, used for cloning.
* @param that The HBaseGroupScan to clone
*/
private BinaryTableGroupScan(BinaryTableGroupScan that) {
super(that);
this.hbaseScanSpec = that.hbaseScanSpec;
this.endpointFragmentMapping = that.endpointFragmentMapping;
this.hTableDesc = that.hTableDesc;
this.tableStats = that.tableStats;
}
@Override
public GroupScan clone(List<SchemaPath> columns) {
BinaryTableGroupScan newScan = new BinaryTableGroupScan(this);
newScan.columns = columns;
newScan.verifyColumns();
return newScan;
}
private void init() {
logger.debug("Getting region locations");
TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
try (Admin admin = formatPlugin.getConnection().getAdmin();
RegionLocator locator = formatPlugin.getConnection().getRegionLocator(tableName)) {
hTableDesc = admin.getTableDescriptor(tableName);
// Fetch tableStats only once and cache it.
if (tableStats == null) {
tableStats = new MapRDBTableStats(getHBaseConf(), hbaseScanSpec.getTableName());
}
boolean foundStartRegion = false;
regionsToScan = new TreeMap<>();
List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
for (HRegionLocation regionLocation : regionLocations) {
HRegionInfo regionInfo = regionLocation.getRegionInfo();
if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
continue;
}
foundStartRegion = true;
regionsToScan.put(new TabletFragmentInfo(regionInfo), regionLocation.getHostname());
if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
break;
}
}
} catch (Exception e) {
throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
}
verifyColumns();
}
private void verifyColumns() {
/*
if (columns != null) {
for (SchemaPath column : columns) {
if (!(column.equals(ROW_KEY_PATH) || hTableDesc.hasFamily(HBaseUtils.getBytes(column.getRootSegment().getPath())))) {
DrillRuntimeException.format("The column family '%s' does not exist in HBase table: %s .",
column.getRootSegment().getPath(), hTableDesc.getNamereplacedtring());
}
}
}
*/
}
protected MapRDBSubScanSpec getSubScanSpec(TabletFragmentInfo tfi) {
HBaseScanSpec spec = hbaseScanSpec;
MapRDBSubScanSpec subScanSpec = new MapRDBSubScanSpec(spec.getTableName(), regionsToScan.get(tfi), (!isNullOrEmpty(spec.getStartRow()) && tfi.containsRow(spec.getStartRow())) ? spec.getStartRow() : tfi.getStartKey(), (!isNullOrEmpty(spec.getStopRow()) && tfi.containsRow(spec.getStopRow())) ? spec.getStopRow() : tfi.getEndKey(), spec.getSerializedFilter(), null);
return subScanSpec;
}
@Override
public MapRDBSubScan getSpecificScan(int minorFragmentId) {
replacedert minorFragmentId < endpointFragmentMapping.size() : String.format("Mappings length [%d] should be greater than minor fragment id [%d] but it isn't.", endpointFragmentMapping.size(), minorFragmentId);
return new MapRDBSubScan(getUserName(), formatPlugin, endpointFragmentMapping.get(minorFragmentId), columns, TABLE_BINARY);
}
@Override
public ScanStats getScanStats() {
// TODO: look at stats for this.
long rowCount = (long) ((hbaseScanSpec.getFilter() != null ? .5 : 1) * tableStats.getNumRows());
int avgColumnSize = 10;
int numColumns = (columns == null || columns.isEmpty()) ? 100 : columns.size();
return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, rowCount, 1, avgColumnSize * numColumns * rowCount);
}
@Override
@JsonIgnore
public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) {
Preconditions.checkArgument(children.isEmpty());
return new BinaryTableGroupScan(this);
}
@JsonIgnore
public Configuration getHBaseConf() {
return getFormatPlugin().getHBaseConf();
}
@JsonIgnore
public String getTableName() {
return getHBaseScanSpec().getTableName();
}
@JsonIgnore
public MapRDBTableStats getTableStats() {
return tableStats;
}
@Override
public String toString() {
return "BinaryTableGroupScan [ScanSpec=" + hbaseScanSpec + ", columns=" + columns + "]";
}
@JsonProperty
public HBaseScanSpec getHBaseScanSpec() {
return hbaseScanSpec;
}
}
19
Source : HTableOperatorImpl.java
with Apache License 2.0
from ucarGroup
with Apache License 2.0
from ucarGroup
private void addFamily(List<ColumnDescriptor> listColumn, HTableDescriptor tableDescriptor) {
for (ColumnDescriptor cd : listColumn) {
tableDescriptor.addFamily(changeCd(cd));
}
}
19
Source : TestHBaseConnection.java
with Apache License 2.0
from openlookeng
with Apache License 2.0
from openlookeng
/**
* listTableDescriptorsByNamespace
*/
public static HTableDescriptor[] listTableDescriptorsByNamespace(final String schema) throws IOException {
if ("hbase".equals(schema)) {
HTableDescriptor[] tables = new HTableDescriptor[3];
for (int iNum = 0; iNum < 3; iNum++) {
if (iNum != 0) {
tables[iNum] = new HTableDescriptor(TableName.valueOf("hbase.test_table" + iNum));
} else {
tables[iNum] = new HTableDescriptor(TableName.valueOf("hbase.test_table"));
}
tables[iNum].addFamily(new HColumnDescriptor("name"));
tables[iNum].addFamily(new HColumnDescriptor("age"));
tables[iNum].addFamily(new HColumnDescriptor("gender"));
tables[iNum].addFamily(new HColumnDescriptor("t"));
}
return tables;
} else {
return new HTableDescriptor[0];
}
}
19
Source : DeployCoprocessorCLI.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
public static void addCoprocessorOnHTable(HTableDescriptor desc, Path hdfsCoprocessorJar) throws IOException {
logger.info("Add coprocessor on " + desc.getNamereplacedtring());
desc.addCoprocessor(CubeEndpointClreplaced, hdfsCoprocessorJar, 1001, null);
}
19
Source : DeployCoprocessorCLI.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
public static void deployCoprocessor(HTableDescriptor tableDesc) {
try {
initHTableCoprocessor(tableDesc);
logger.info("hbase table " + tableDesc.getTableName() + " deployed with coprocessor.");
} catch (Exception ex) {
logger.error("Error deploying coprocessor on " + tableDesc.getTableName(), ex);
logger.error("Will try creating the table without coprocessor.");
}
}
19
Source : HFileOutputFormat3.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
/**
* Configure a MapReduce Job to perform an incremental load into the given
* table. This
* <ul>
* <li>Inspects the table to configure a total order parreplacedioner</li>
* <li>Uploads the parreplacedions file to the cluster and adds it to the DistributedCache</li>
* <li>Sets the number of reduce tasks to match the current number of regions</li>
* <li>Sets the output key/value clreplaced to match HFileOutputFormat2's requirements</li>
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
* PutSortReducer)</li>
* </ul>
* The user should be sure to set the map output value clreplaced to either KeyValue or Put before
* running this function.
*/
public static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor, RegionLocator regionLocator) throws IOException {
configureIncrementalLoad(job, tableDescriptor, regionLocator, HFileOutputFormat3.clreplaced);
}
19
Source : MockHTable.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
/**
* {@inheritDoc}
*/
@Override
public HTableDescriptor getTableDescriptor() throws IOException {
HTableDescriptor table = new HTableDescriptor(tableName);
for (String columnFamily : columnFamilies) {
table.addFamily(new HColumnDescriptor(columnFamily));
}
return table;
}
19
Source : TestWALFactory.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Tests that we can write out an edit, close, and then read it back in again.
* @throws IOException
*/
@Test
public void testEditAdd() throws IOException {
final int COL_COUNT = 10;
final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor("column"));
final byte[] row = Bytes.toBytes("row");
WAL.Reader reader = null;
try {
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
for (int i = 0; i < COL_COUNT; i++) {
cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), row, Bytes.toBytes(Bytes.toString(row) + "1"), false);
final WAL log = wals.getWAL(info.getEncodedNameAsBytes());
final long txid = log.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(), mvcc), cols, true);
log.sync(txid);
log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getFamiliesKeys());
log.completeCacheFlush(info.getEncodedNameAsBytes());
log.shutdown();
Path filename = DefaultWALProvider.getCurrentFileName(log);
// Now open a reader on the log and replacedert append worked.
reader = wals.createReader(fs, filename);
// Above we added all columns on a single row so we only read one
// entry in the below... thats why we have '1'.
for (int i = 0; i < 1; i++) {
WAL.Entry entry = reader.next(null);
if (entry == null)
break;
WALKey key = entry.getKey();
WALEdit val = entry.getEdit();
replacedertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
replacedertTrue(htd.getTableName().equals(key.getTablename()));
Cell cell = val.getCells().get(0);
replacedertTrue(Bytes.equals(row, cell.getRow()));
replacedertEquals((byte) (i + '0'), cell.getValue()[0]);
System.out.println(key + " " + val);
}
} finally {
if (reader != null) {
reader.close();
}
}
}
19
Source : TestWALFactory.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* @throws IOException
*/
@Test
public void testAppend() throws IOException {
final int COL_COUNT = 10;
final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor("column"));
final byte[] row = Bytes.toBytes("row");
WAL.Reader reader = null;
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
try {
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
for (int i = 0; i < COL_COUNT; i++) {
cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
}
HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
final WAL log = wals.getWAL(hri.getEncodedNameAsBytes());
final long txid = log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(), mvcc), cols, true);
log.sync(txid);
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
log.completeCacheFlush(hri.getEncodedNameAsBytes());
log.shutdown();
Path filename = DefaultWALProvider.getCurrentFileName(log);
// Now open a reader on the log and replacedert append worked.
reader = wals.createReader(fs, filename);
WAL.Entry entry = reader.next();
replacedertEquals(COL_COUNT, entry.getEdit().size());
int idx = 0;
for (Cell val : entry.getEdit().getCells()) {
replacedertTrue(Bytes.equals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName()));
replacedertTrue(htd.getTableName().equals(entry.getKey().getTablename()));
replacedertTrue(Bytes.equals(row, val.getRow()));
replacedertEquals((byte) (idx + '0'), val.getValue()[0]);
System.out.println(entry.getKey() + " " + val);
idx++;
}
} finally {
if (reader != null) {
reader.close();
}
}
}
19
Source : TestDefaultWALProvider.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test
public void testLogCleaning() throws Exception {
LOG.info("testLogCleaning");
final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testLogCleaning")).addFamily(new HColumnDescriptor("row"));
final HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf("testLogCleaning2")).addFamily(new HColumnDescriptor("row"));
final Configuration localConf = new Configuration(conf);
localConf.set(WALFactory.WAL_PROVIDER, DefaultWALProvider.clreplaced.getName());
final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
final AtomicLong sequenceId = new AtomicLong(1);
try {
HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HRegionInfo hri2 = new HRegionInfo(htd2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
// we want to mix edits from regions, so pick our own identifier.
final WAL log = wals.getWAL(UNSPECIFIED_REGION);
// Add a single edit and make sure that rolling won't remove the file
// Before HBASE-3198 it used to delete it
addEdits(log, hri, htd, 1);
log.rollWriter();
replacedertEquals(1, DefaultWALProvider.getNumRolledLogFiles(log));
// See if there's anything wrong with more than 1 edit
addEdits(log, hri, htd, 2);
log.rollWriter();
replacedertEquals(2, DefaultWALProvider.getNumRolledLogFiles(log));
// Now mix edits from 2 regions, still no flushing
addEdits(log, hri, htd, 1);
addEdits(log, hri2, htd2, 1);
addEdits(log, hri, htd, 1);
addEdits(log, hri2, htd2, 1);
log.rollWriter();
replacedertEquals(3, DefaultWALProvider.getNumRolledLogFiles(log));
// Flush the first region, we expect to see the first two files getting
// archived. We need to append something or writer won't be rolled.
addEdits(log, hri2, htd2, 1);
log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
log.completeCacheFlush(hri.getEncodedNameAsBytes());
log.rollWriter();
replacedertEquals(2, DefaultWALProvider.getNumRolledLogFiles(log));
// Flush the second region, which removes all the remaining output files
// since the oldest was completely flushed and the two others only contain
// flush information
addEdits(log, hri2, htd2, 1);
log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getFamiliesKeys());
log.completeCacheFlush(hri2.getEncodedNameAsBytes());
log.rollWriter();
replacedertEquals(0, DefaultWALProvider.getNumRolledLogFiles(log));
} finally {
if (wals != null) {
wals.close();
}
}
}
19
Source : FaultyFSLog.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Override
public long append(HTableDescriptor htd, HRegionInfo info, WALKey key, WALEdit edits, boolean inMemstore) throws IOException {
if (this.ft == FailureType.APPEND) {
throw new IOException("append");
}
return super.append(htd, info, key, edits, inMemstore);
}
19
Source : OfflineMetaRebuildTestCore.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
HTableDescriptor[] htbls = null;
try (Connection connection = ConnectionFactory.createConnection(configuration)) {
try (Admin admin = connection.getAdmin()) {
htbls = admin.listTables();
}
}
return htbls;
}
19
Source : OfflineMetaRebuildTestCore.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private void dumpMeta(HTableDescriptor htd) throws IOException {
List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
for (byte[] row : metaRows) {
LOG.info(Bytes.toString(row));
}
}
19
Source : TestRestoreSnapshotHelper.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Execute the restore operation
* @param snapshotDir The snapshot directory to use as "restore source"
* @param sd The snapshot descriptor
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
public void testRestore(final Path snapshotDir, final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
new FSTableDescriptors(conf).createTableDescriptor(htdClone);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
helper.restoreHdfsRegions();
LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
}
19
Source : SecureTestUtil.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd) throws Exception {
createTable(testUtil, testUtil.getHBaseAdmin(), htd);
}
19
Source : SecureTestUtil.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd) throws Exception {
createTable(testUtil, admin, htd, null);
}
19
Source : SecureTestUtil.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd, byte[][] splitKeys) throws Exception {
createTable(testUtil, testUtil.getHBaseAdmin(), htd, splitKeys);
}
19
Source : TestRegionReplicaReplicationEndpoint.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test(timeout = 240000)
public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exception {
// modify a table by adding region replicas. Check whether the replication peer is created
// and replication started.
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
String peerId = "region_replica_replication";
if (admin.getPeerConfig(peerId) != null) {
admin.removePeer(peerId);
}
HTableDescriptor htd = HTU.createTableDescriptor("testRegionReplicaReplicationPeerIsCreatedForModifyTable");
HTU.getHBaseAdmin().createTable(htd);
// replacedert that replication peer is not created yet
ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
replacedertNull(peerConfig);
HTU.getHBaseAdmin().disableTable(htd.getTableName());
htd.setRegionReplication(2);
HTU.getHBaseAdmin().modifyTable(htd.getTableName(), htd);
HTU.getHBaseAdmin().enableTable(htd.getTableName());
// replacedert peer configuration is correct
peerConfig = admin.getPeerConfig(peerId);
replacedertNotNull(peerConfig);
replacedertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(HTU.getConfiguration()));
replacedertEquals(peerConfig.getReplicationEndpointImpl(), RegionReplicaReplicationEndpoint.clreplaced.getName());
admin.close();
}
19
Source : TestRegionReplicaReplicationEndpoint.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test
public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException {
// create a table with region replicas. Check whether the replication peer is created
// and replication started.
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
String peerId = "region_replica_replication";
if (admin.getPeerConfig(peerId) != null) {
admin.removePeer(peerId);
}
HTableDescriptor htd = HTU.createTableDescriptor("testReplicationPeerIsCreated_no_region_replicas");
HTU.getHBaseAdmin().createTable(htd);
ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
replacedertNull(peerConfig);
htd = HTU.createTableDescriptor("testReplicationPeerIsCreated");
htd.setRegionReplication(2);
HTU.getHBaseAdmin().createTable(htd);
// replacedert peer configuration is correct
peerConfig = admin.getPeerConfig(peerId);
replacedertNotNull(peerConfig);
replacedertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(HTU.getConfiguration()));
replacedertEquals(peerConfig.getReplicationEndpointImpl(), RegionReplicaReplicationEndpoint.clreplaced.getName());
admin.close();
}
19
Source : TestFSHLog.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test
public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
final String name = "testSyncRunnerIndexOverflow";
FSHLog log = new FSHLog(fs, FSUtils.getRootDir(conf), name, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
try {
Field ringBufferEventHandlerField = FSHLog.clreplaced.getDeclaredField("ringBufferEventHandler");
ringBufferEventHandlerField.setAccessible(true);
FSHLog.RingBufferEventHandler ringBufferEventHandler = (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
Field syncRunnerIndexField = FSHLog.RingBufferEventHandler.clreplaced.getDeclaredField("syncRunnerIndex");
syncRunnerIndexField.setAccessible(true);
syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 10; i++) {
addEdits(log, hri, htd, 1, mvcc);
}
} finally {
log.close();
}
}
19
Source : TestPerColumnFamilyFlush.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private void initHRegion(String callingMethod, Configuration conf) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
for (byte[] family : FAMILIES) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
Path path = new Path(DIR, callingMethod);
region = HRegion.createHRegion(info, path, conf, htd);
}
19
Source : TestMinorCompaction.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test minor compactions
*/
@Category(MediumTests.clreplaced)
public clreplaced TestMinorCompaction {
@Rule
public TestName name = new TestName();
private static final Log LOG = LogFactory.getLog(TestMinorCompaction.clreplaced.getName());
private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
protected Configuration conf = UTIL.getConfiguration();
private Region r = null;
private HTableDescriptor htd = null;
private int compactionThreshold;
private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
final private byte[] col1, col2;
/**
* constructor
*/
public TestMinorCompaction() {
super();
// Set cache flush size to 1MB
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
firstRowBytes = START_KEY_BYTES;
secondRowBytes = START_KEY_BYTES.clone();
// Increment the least significant character so we get to next row.
secondRowBytes[START_KEY_BYTES.length - 1]++;
thirdRowBytes = START_KEY_BYTES.clone();
thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
col1 = Bytes.toBytes("column1");
col2 = Bytes.toBytes("column2");
}
@Before
public void setUp() throws Exception {
this.htd = UTIL.createTableDescriptor(name.getMethodName());
this.r = UTIL.createLocalHRegion(htd, null, null);
}
@After
public void tearDown() throws Exception {
WAL wal = ((HRegion) r).getWAL();
((HRegion) r).close();
wal.close();
}
@Test
public void testMinorCompactionWithDeleteRow() throws Exception {
Delete deleteRow = new Delete(secondRowBytes);
testMinorCompactionWithDelete(deleteRow);
}
@Test
public void testMinorCompactionWithDeleteColumn1() throws Exception {
Delete dc = new Delete(secondRowBytes);
/* delete all timestamps in the column */
dc.deleteColumns(fam2, col2);
testMinorCompactionWithDelete(dc);
}
@Test
public void testMinorCompactionWithDeleteColumn2() throws Exception {
Delete dc = new Delete(secondRowBytes);
dc.deleteColumn(fam2, col2);
/* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
* we only delete the latest version. One might expect to see only
* versions 1 and 2. HBase differs, and gives us 0, 1 and 2.
* This is okay as well. Since there was no compaction done before the
* delete, version 0 seems to stay on.
*/
testMinorCompactionWithDelete(dc, 3);
}
@Test
public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
Delete deleteCF = new Delete(secondRowBytes);
deleteCF.deleteFamily(fam2);
testMinorCompactionWithDelete(deleteCF);
}
@Test
public void testMinorCompactionWithDeleteVersion1() throws Exception {
Delete deleteVersion = new Delete(secondRowBytes);
deleteVersion.deleteColumns(fam2, col2, 2);
/* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
* We delete versions 0 ... 2. So, we still have one remaining.
*/
testMinorCompactionWithDelete(deleteVersion, 1);
}
@Test
public void testMinorCompactionWithDeleteVersion2() throws Exception {
Delete deleteVersion = new Delete(secondRowBytes);
deleteVersion.deleteColumn(fam2, col2, 1);
/*
* the table has 4 versions: 0, 1, 2, and 3.
* We delete 1.
* Should have 3 remaining.
*/
testMinorCompactionWithDelete(deleteVersion, 3);
}
/*
* A helper function to test the minor compaction algorithm. We check that
* the delete markers are left behind. Takes delete as an argument, which
* can be any delete (row, column, columnfamliy etc), that essentially
* deletes row2 and column2. row1 and column1 should be undeleted
*/
private void testMinorCompactionWithDelete(Delete delete) throws Exception {
testMinorCompactionWithDelete(delete, 0);
}
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
HRegionIncommon loader = new HRegionIncommon(r);
for (int i = 0; i < compactionThreshold + 1; i++) {
HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
r.flush(true);
}
Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
// Now add deletes to memstore and then flush it. That will put us over
// the compaction threshold of 3 store files. Compacting these store files
// should result in a compacted store file that has no references to the
// deleted row.
r.delete(delete);
// Make sure that we have only deleted family2 from secondRowBytes
result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
replacedertEquals(expectedResultsAfterDelete, result.size());
// but we still have firstrow
result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
r.flush(true);
// should not change anything.
// Let us check again
// Make sure that we have only deleted family2 from secondRowBytes
result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
replacedertEquals(expectedResultsAfterDelete, result.size());
// but we still have firstrow
result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
// do a compaction
Store store2 = r.getStore(fam2);
int numFiles1 = store2.getStorefiles().size();
// > 3
replacedertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold);
// = 3
((HStore) store2).compactRecentForTestingreplacedumingDefaultPolicy(compactionThreshold);
int numFiles2 = store2.getStorefiles().size();
// Check that we did compact
replacedertTrue("Number of store files should go down", numFiles1 > numFiles2);
// Check that it was a minor compaction.
replacedertTrue("Was not supposed to be a major compaction", numFiles2 > 1);
// Make sure that we have only deleted family2 from secondRowBytes
result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
replacedertEquals(expectedResultsAfterDelete, result.size());
// but we still have firstrow
result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
}
}
19
Source : TestMajorCompaction.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test major compactions
*/
@Category(MediumTests.clreplaced)
public clreplaced TestMajorCompaction {
@Rule
public TestName name = new TestName();
private static final Log LOG = LogFactory.getLog(TestMajorCompaction.clreplaced.getName());
private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
protected Configuration conf = UTIL.getConfiguration();
private Region r = null;
private HTableDescriptor htd = null;
private static final byte[] COLUMN_FAMILY = fam1;
private final byte[] STARTROW = Bytes.toBytes(START_KEY);
private static final byte[] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
private int compactionThreshold;
private byte[] secondRowBytes, thirdRowBytes;
private static final long MAX_FILES_TO_COMPACT = 10;
/**
* constructor
*/
public TestMajorCompaction() {
super();
// Set cache flush size to 1MB
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
secondRowBytes = START_KEY_BYTES.clone();
// Increment the least significant character so we get to next row.
secondRowBytes[START_KEY_BYTES.length - 1]++;
thirdRowBytes = START_KEY_BYTES.clone();
thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
}
@Before
public void setUp() throws Exception {
this.htd = UTIL.createTableDescriptor(name.getMethodName());
this.r = UTIL.createLocalHRegion(htd, null, null);
}
@After
public void tearDown() throws Exception {
WAL wal = ((HRegion) r).getWAL();
((HRegion) r).close();
wal.close();
}
/**
* Test that on a major compaction, if all cells are expired or deleted, then
* we'll end up with no product. Make sure scanner over region returns
* right answer in this case - and that it just basically works.
* @throws IOException
*/
@Test
public void testMajorCompactingToNoOutput() throws IOException {
createStoreFile(r);
for (int i = 0; i < compactionThreshold; i++) {
createStoreFile(r);
}
// Now delete everything.
InternalScanner s = r.getScanner(new Scan());
do {
List<Cell> results = new ArrayList<Cell>();
boolean result = s.next(results);
r.delete(new Delete(CellUtil.cloneRow(results.get(0))));
if (!result)
break;
} while (true);
s.close();
// Flush
r.flush(true);
// Major compact.
r.compact(true);
s = r.getScanner(new Scan());
int counter = 0;
do {
List<Cell> results = new ArrayList<Cell>();
boolean result = s.next(results);
if (!result)
break;
counter++;
} while (true);
replacedertEquals(0, counter);
}
/**
* Run compaction and flushing memstore
* replacedert deletes get cleaned up.
* @throws Exception
*/
@Test
public void testMajorCompaction() throws Exception {
majorCompaction();
}
@Test
public void testDataBlockEncodingInCacheOnly() throws Exception {
majorCompactionWithDataBlockEncoding(true);
}
@Test
public void testDataBlockEncodingEverywhere() throws Exception {
majorCompactionWithDataBlockEncoding(false);
}
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception {
Map<Store, HFileDataBlockEncoder> replaceBlockCache = new HashMap<Store, HFileDataBlockEncoder>();
for (Store store : r.getStores()) {
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache;
((HStore) store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<Store, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
((HStore) entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
}
}
private void majorCompaction() throws Exception {
createStoreFile(r);
for (int i = 0; i < compactionThreshold; i++) {
createStoreFile(r);
}
// Add more content.
HBaseTestCase.addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
// Now there are about 5 versions of each column.
// Default is that there only 3 (MAXVERSIONS) versions allowed per column.
//
// replacedert == 3 when we ask for versions.
Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
replacedertEquals(compactionThreshold, result.size());
// see if CompactionProgress is in place but null
for (Store store : r.getStores()) {
replacedertNull(store.getCompactionProgress());
}
r.flush(true);
r.compact(true);
// see if CompactionProgress has done its thing on at least one store
int storeCount = 0;
for (Store store : r.getStores()) {
CompactionProgress progress = store.getCompactionProgress();
if (progress != null) {
++storeCount;
replacedertTrue(progress.currentCompactedKVs > 0);
replacedertTrue(progress.totalCompactingKVs > 0);
}
replacedertTrue(storeCount > 0);
}
// look at the second row
// Increment the least significant character so we get to next row.
byte[] secondRowBytes = START_KEY_BYTES.clone();
secondRowBytes[START_KEY_BYTES.length - 1]++;
// Always 3 versions if that is what max versions is.
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " + "initial compaction: " + result);
replacedertEquals("Invalid number of versions of row " + Bytes.toStringBinary(secondRowBytes) + ".", compactionThreshold, result.size());
// Now add deletes to memstore and then flush it.
// That will put us over
// the compaction threshold of 3 store files. Compacting these store files
// should result in a compacted store file that has no references to the
// deleted row.
LOG.debug("Adding deletes to memstore and flushing");
Delete delete = new Delete(secondRowBytes, System.currentTimeMillis());
byte[][] famAndQf = { COLUMN_FAMILY, null };
delete.deleteFamily(famAndQf[0]);
r.delete(delete);
// replacedert deleted.
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
replacedertTrue("Second row should have been deleted", result.isEmpty());
r.flush(true);
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
replacedertTrue("Second row should have been deleted", result.isEmpty());
// Add a bit of data and flush. Start adding at 'bbb'.
createSmallerStoreFile(this.r);
r.flush(true);
// replacedert that the second row is still deleted.
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
replacedertTrue("Second row should still be deleted", result.isEmpty());
// Force major compaction.
r.compact(true);
replacedertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
replacedertTrue("Second row should still be deleted", result.isEmpty());
// Make sure the store files do have some 'aaa' keys in them -- exactly 3.
// Also, that compacted store files do not have any secondRowBytes because
// they were deleted.
verifyCounts(3, 0);
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
final int ttl = 1000;
for (Store hstore : r.getStores()) {
HStore store = ((HStore) hstore);
ScanInfo old = store.getScanInfo();
ScanInfo si = new ScanInfo(old.getConfiguration(), old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator());
store.setScanInfo(si);
}
Thread.sleep(1000);
r.compact(true);
int count = count();
replacedertEquals("Should not see anything after TTL has expired", 0, count);
}
@Test
public void testTimeBasedMajorCompaction() throws Exception {
// create 2 storefiles and force a major compaction to reset the time
// 10 sec
int delay = 10 * 1000;
// 20%
float jitterPct = 0.20f;
conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
s.storeEngine.getCompactionPolicy().setConf(conf);
try {
createStoreFile(r);
createStoreFile(r);
r.compact(true);
// add one more file & verify that a regular compaction won't work
createStoreFile(r);
r.compact(false);
replacedertEquals(2, s.getStorefilesCount());
// ensure that major compaction time is deterministic
RatioBasedCompactionPolicy c = (RatioBasedCompactionPolicy) s.storeEngine.getCompactionPolicy();
Collection<StoreFile> storeFiles = s.getStorefiles();
long mcTime = c.getNextMajorCompactTime(storeFiles);
for (int i = 0; i < 10; ++i) {
replacedertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
}
// ensure that the major compaction time is within the variance
long jitter = Math.round(delay * jitterPct);
replacedertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);
// wait until the time-based compaction interval
Thread.sleep(mcTime);
// trigger a compaction request and ensure that it's upgraded to major
r.compact(false);
replacedertEquals(1, s.getStorefilesCount());
} finally {
// reset the timed compaction settings
conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24);
conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
// run a major to reset the cache
createStoreFile(r);
r.compact(true);
replacedertEquals(1, s.getStorefilesCount());
}
}
private void verifyCounts(int countRow1, int countRow2) throws Exception {
int count1 = 0;
int count2 = 0;
for (StoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
scanner.seekTo();
do {
byte[] row = scanner.getKeyValue().getRow();
if (Bytes.equals(row, STARTROW)) {
count1++;
} else if (Bytes.equals(row, secondRowBytes)) {
count2++;
}
} while (scanner.next());
}
replacedertEquals(countRow1, count1);
replacedertEquals(countRow2, count2);
}
private int count() throws IOException {
int count = 0;
for (StoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
if (!scanner.seekTo()) {
continue;
}
do {
count++;
} while (scanner.next());
}
return count;
}
private void createStoreFile(final Region region) throws IOException {
createStoreFile(region, Bytes.toString(COLUMN_FAMILY));
}
private void createStoreFile(final Region region, String family) throws IOException {
HRegionIncommon loader = new HRegionIncommon(region);
HBaseTestCase.addContent(loader, family);
loader.flushcache();
}
private void createSmallerStoreFile(final Region region) throws IOException {
HRegionIncommon loader = new HRegionIncommon(region);
HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" + "bbb").getBytes(), null);
loader.flushcache();
}
/**
* Test for HBASE-5920 - Test user requested major compactions always occurring
*/
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
replacedertNotNull("Expected to receive a compaction request", request);
replacedertEquals("System-requested major compaction should not occur if there are too many store files", false, request.isMajor());
}
/**
* Test for HBASE-5920
*/
@Test
public void testUserMajorCompactionRequest() throws IOException {
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null).getRequest();
replacedertNotNull("Expected to receive a compaction request", request);
replacedertEquals("User-requested major compaction should always occur, even if there are too many store files", true, request.isMajor());
}
/**
* Test that on a major compaction, if all cells are expired or deleted, then we'll end up with no
* product. Make sure scanner over region returns right answer in this case - and that it just
* basically works.
* @throws IOException
*/
public void testMajorCompactingToNoOutputWithReverseScan() throws IOException {
createStoreFile(r);
for (int i = 0; i < compactionThreshold; i++) {
createStoreFile(r);
}
// Now delete everything.
Scan scan = new Scan();
scan.setReversed(true);
InternalScanner s = r.getScanner(scan);
do {
List<Cell> results = new ArrayList<Cell>();
boolean result = s.next(results);
replacedertTrue(!results.isEmpty());
r.delete(new Delete(results.get(0).getRow()));
if (!result)
break;
} while (true);
s.close();
// Flush
r.flush(true);
// Major compact.
r.compact(true);
scan = new Scan();
scan.setReversed(true);
s = r.getScanner(scan);
int counter = 0;
do {
List<Cell> results = new ArrayList<Cell>();
boolean result = s.next(results);
if (!result)
break;
counter++;
} while (true);
s.close();
replacedertEquals(0, counter);
}
}
19
Source : TestHRegionInfo.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test
public void testLastRegionCompare() {
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("testtable"));
HRegionInfo hrip = new HRegionInfo(tableDesc.getTableName(), Bytes.toBytes("a"), new byte[0]);
HRegionInfo hric = new HRegionInfo(tableDesc.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("b"));
replacedertTrue(hrip.compareTo(hric) > 0);
}
19
Source : TestEncryptionRandomKeying.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Category(MediumTests.clreplaced)
public clreplaced TestEncryptionRandomKeying {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Configuration conf = TEST_UTIL.getConfiguration();
private static HTableDescriptor htd;
private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
List<Path> paths = new ArrayList<Path>();
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) {
for (Store store : region.getStores()) {
for (StoreFile storefile : store.getStorefiles()) {
paths.add(storefile.getPath());
}
}
}
return paths;
}
private static byte[] extractHFileKey(Path path) throws Exception {
HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), conf);
try {
reader.loadFileInfo();
Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
replacedertNotNull("Reader has a null crypto context", cryptoContext);
Key key = cryptoContext.getKey();
if (key == null) {
return null;
}
return key.getEncoded();
} finally {
reader.close();
}
}
@BeforeClreplaced
public static void setUp() throws Exception {
conf.setInt("hfile.format.version", 3);
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.clreplaced.getName());
conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
// Create the table schema
// Specify an encryption algorithm without a key
htd = new HTableDescriptor(TableName.valueOf("default", "TestEncryptionRandomKeying"));
HColumnDescriptor hcd = new HColumnDescriptor("cf");
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
hcd.setEncryptionType(algorithm);
htd.addFamily(hcd);
// Start the minicluster
TEST_UTIL.startMiniCluster(1);
// Create the test table
TEST_UTIL.getHBaseAdmin().createTable(htd);
TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
// Create a store file
Table table = new HTable(conf, htd.getTableName());
try {
table.put(new Put(Bytes.toBytes("testrow")).add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
} finally {
table.close();
}
TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
}
@AfterClreplaced
public static void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testRandomKeying() throws Exception {
// Verify we have store file(s) with a random key
final List<Path> initialPaths = findStorefilePaths(htd.getTableName());
replacedertTrue(initialPaths.size() > 0);
for (Path path : initialPaths) {
replacedertNotNull("Store file " + path + " is not encrypted", extractHFileKey(path));
}
}
}
19
Source : TestCompaction.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test compaction framework and common functions
*/
@Category(MediumTests.clreplaced)
public clreplaced TestCompaction {
@Rule
public TestName name = new TestName();
private static final Log LOG = LogFactory.getLog(TestCompaction.clreplaced.getName());
private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
protected Configuration conf = UTIL.getConfiguration();
private HRegion r = null;
private HTableDescriptor htd = null;
private static final byte[] COLUMN_FAMILY = fam1;
private final byte[] STARTROW = Bytes.toBytes(START_KEY);
private static final byte[] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
private int compactionThreshold;
private byte[] secondRowBytes, thirdRowBytes;
private static final long MAX_FILES_TO_COMPACT = 10;
/**
* constructor
*/
public TestCompaction() {
super();
// Set cache flush size to 1MB
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, NoLimitCompactionThroughputController.clreplaced.getName());
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
secondRowBytes = START_KEY_BYTES.clone();
// Increment the least significant character so we get to next row.
secondRowBytes[START_KEY_BYTES.length - 1]++;
thirdRowBytes = START_KEY_BYTES.clone();
thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
}
@Before
public void setUp() throws Exception {
this.htd = UTIL.createTableDescriptor(name.getMethodName());
this.r = UTIL.createLocalHRegion(htd, null, null);
}
@After
public void tearDown() throws Exception {
WAL wal = r.getWAL();
this.r.close();
wal.close();
}
/**
* Verify that you can stop a long-running compaction (used during RS shutdown)
* @throws Exception
*/
@Test
public void testInterruptCompaction() throws Exception {
replacedertEquals(0, count());
// lower the polling interval for this test
int origWI = HStore.closeCheckInterval;
// 10 KB
HStore.closeCheckInterval = 10 * 1000;
try {
// Create a couple store files w/ 15KB (over 10KB interval)
int jmax = (int) Math.ceil(15.0 / compactionThreshold);
// 1 KB chunk
byte[] pad = new byte[1000];
for (int i = 0; i < compactionThreshold; i++) {
HRegionIncommon loader = new HRegionIncommon(r);
Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
p.setDurability(Durability.SKIP_WAL);
for (int j = 0; j < jmax; j++) {
p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
loader.flushcache();
}
HRegion spyR = spy(r);
doAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) throws Throwable {
r.writestate.writesEnabled = false;
return invocation.callRealMethod();
}
}).when(spyR).doRegionCompactionPrep();
// force a minor compaction, but not before requesting a stop
spyR.compactStores();
// ensure that the compaction stopped, all old files are intact,
Store s = r.stores.get(COLUMN_FAMILY);
replacedertEquals(compactionThreshold, s.getStorefilesCount());
replacedertTrue(s.getStorefilesSize() > 15 * 1000);
// and no new store files persisted past compactStores()
FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
replacedertEquals(0, ls.length);
} finally {
// don't mess up future tests
r.writestate.writesEnabled = true;
HStore.closeCheckInterval = origWI;
// Delete all Store information once done using
for (int i = 0; i < compactionThreshold; i++) {
Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
byte[][] famAndQf = { COLUMN_FAMILY, null };
delete.deleteFamily(famAndQf[0]);
r.delete(delete);
}
r.flush(true);
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
final int ttl = 1000;
for (Store hstore : this.r.stores.values()) {
HStore store = (HStore) hstore;
ScanInfo old = store.getScanInfo();
ScanInfo si = new ScanInfo(old.getConfiguration(), old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator());
store.setScanInfo(si);
}
Thread.sleep(ttl);
r.compact(true);
replacedertEquals(0, count());
}
}
private int count() throws IOException {
int count = 0;
for (StoreFile f : this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
if (!scanner.seekTo()) {
continue;
}
do {
count++;
} while (scanner.next());
}
return count;
}
private void createStoreFile(final HRegion region) throws IOException {
createStoreFile(region, Bytes.toString(COLUMN_FAMILY));
}
private void createStoreFile(final HRegion region, String family) throws IOException {
HRegionIncommon loader = new HRegionIncommon(region);
HBaseTestCase.addContent(loader, family);
loader.flushcache();
}
@Test
public void testCompactionWithCorruptResult() throws Exception {
int nfiles = 10;
for (int i = 0; i < nfiles; i++) {
createStoreFile(r);
}
HStore store = (HStore) r.getStore(COLUMN_FAMILY);
Collection<StoreFile> storeFiles = store.getStorefiles();
DefaultCompactor tool = (DefaultCompactor) store.storeEngine.getCompactor();
tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file.
FileSystem fs = store.getFileSystem();
// default compaction policy created one and only one new compacted file
Path dstPath = store.getRegionFileSystem().createTempName();
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short) 3, (long) 1024, null);
stream.writeChars("CORRUPT FILE!!!!");
stream.close();
Path origPath = store.getRegionFileSystem().commitStoreFile(Bytes.toString(COLUMN_FAMILY), dstPath);
try {
((HStore) store).moveFileIntoPlace(origPath);
} catch (Exception e) {
// The complete compaction should fail and the corrupt file should remain
// in the 'tmp' directory;
replacedert (fs.exists(origPath));
replacedert (!fs.exists(dstPath));
System.out.println("testCompactionWithCorruptResult Preplaceded");
return;
}
fail("testCompactionWithCorruptResult failed since no exception was" + "thrown while completing a corrupt file");
}
/**
* Create a custom compaction request and be sure that we can track it through the queue, knowing
* when the compaction is completed.
*/
@Test
public void testTrackingCompactionRequest() throws Exception {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.clreplaced);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
CountDownLatch latch = new CountDownLatch(1);
TrackableCompactionRequest request = new TrackableCompactionRequest(latch);
thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request, null);
// wait for the latch to complete.
latch.await();
thread.interruptIfNecessary();
}
/**
* HBASE-7947: Regression test to ensure adding to the correct list in the
* {@link CompactSplitThread}
* @throws Exception on failure
*/
@Test
public void testMultipleCustomCompactionRequests() throws Exception {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.clreplaced);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
int numStores = r.getStores().size();
List<Pair<CompactionRequest, Store>> requests = new ArrayList<Pair<CompactionRequest, Store>>(numStores);
CountDownLatch latch = new CountDownLatch(numStores);
// create some store files and setup requests for each store on which we want to do a
// compaction
for (Store store : r.getStores()) {
createStoreFile(r, store.getColumnFamilyName());
createStoreFile(r, store.getColumnFamilyName());
createStoreFile(r, store.getColumnFamilyName());
requests.add(new Pair<CompactionRequest, Store>(new TrackableCompactionRequest(latch), store));
}
thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER, Collections.unmodifiableList(requests), null);
// wait for the latch to complete.
latch.await();
thread.interruptIfNecessary();
}
private clreplaced StoreMockMaker extends StatefulStoreMockMaker {
public ArrayList<StoreFile> compacting = new ArrayList<StoreFile>();
public ArrayList<StoreFile> notCompacting = new ArrayList<StoreFile>();
private ArrayList<Integer> results;
public StoreMockMaker(ArrayList<Integer> results) {
this.results = results;
}
public clreplaced TestCompactionContext extends CompactionContext {
private List<StoreFile> selectedFiles;
public TestCompactionContext(List<StoreFile> selectedFiles) {
super();
this.selectedFiles = selectedFiles;
}
@Override
public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
return new ArrayList<StoreFile>();
}
@Override
public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException {
this.request = new CompactionRequest(selectedFiles);
this.request.setPriority(getPriority());
return true;
}
@Override
public List<Path> compact(CompactionThroughputController throughputController) throws IOException {
return compact(throughputController, null);
}
@Override
public List<Path> compact(CompactionThroughputController throughputController, User user) throws IOException {
finishCompaction(this.selectedFiles);
return new ArrayList<Path>();
}
}
@Override
public synchronized CompactionContext selectCompaction() {
CompactionContext ctx = new TestCompactionContext(new ArrayList<StoreFile>(notCompacting));
compacting.addAll(notCompacting);
notCompacting.clear();
try {
ctx.select(null, false, false, false);
} catch (IOException ex) {
fail("Shouldn't happen");
}
return ctx;
}
@Override
public synchronized void cancelCompaction(Object object) {
TestCompactionContext ctx = (TestCompactionContext) object;
compacting.removeAll(ctx.selectedFiles);
notCompacting.addAll(ctx.selectedFiles);
}
public synchronized void finishCompaction(List<StoreFile> sfs) {
if (sfs.isEmpty())
return;
synchronized (results) {
results.add(sfs.size());
}
compacting.removeAll(sfs);
}
@Override
public int getPriority() {
return 7 - compacting.size() - notCompacting.size();
}
}
public clreplaced BlockingStoreMockMaker extends StatefulStoreMockMaker {
BlockingCompactionContext blocked = null;
public clreplaced BlockingCompactionContext extends CompactionContext {
public volatile boolean isInCompact = false;
public void unblock() {
synchronized (this) {
this.notifyAll();
}
}
@Override
public List<Path> compact(CompactionThroughputController throughputController) throws IOException {
return compact(throughputController, null);
}
@Override
public List<Path> compact(CompactionThroughputController throughputController, User user) throws IOException {
try {
isInCompact = true;
synchronized (this) {
this.wait();
}
} catch (InterruptedException e) {
replacedume.replacedumeNoException(e);
}
return new ArrayList<Path>();
}
@Override
public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
return new ArrayList<StoreFile>();
}
@Override
public boolean select(List<StoreFile> f, boolean i, boolean m, boolean e) throws IOException {
this.request = new CompactionRequest(new ArrayList<StoreFile>());
return true;
}
}
@Override
public CompactionContext selectCompaction() {
this.blocked = new BlockingCompactionContext();
try {
this.blocked.select(null, false, false, false);
} catch (IOException ex) {
fail("Shouldn't happen");
}
return this.blocked;
}
@Override
public void cancelCompaction(Object object) {
}
public int getPriority() {
// some invalid value, see createStoreMock
return Integer.MIN_VALUE;
}
public BlockingCompactionContext waitForBlocking() {
while (this.blocked == null || !this.blocked.isInCompact) {
Threads.sleepWithoutInterrupt(50);
}
BlockingCompactionContext ctx = this.blocked;
this.blocked = null;
return ctx;
}
@Override
public Store createStoreMock(String name) throws Exception {
return createStoreMock(Integer.MIN_VALUE, name);
}
public Store createStoreMock(int priority, String name) throws Exception {
// Override the mock to always return the specified priority.
Store s = super.createStoreMock(name);
when(s.getCompactPriority()).thenReturn(priority);
return s;
}
}
/**
* Test compaction priority management and multiple compactions per store (HBASE-8665).
*/
@Test
public void testCompactionQueuePriorities() throws Exception {
// Setup a compact/split thread on a mock server.
final Configuration conf = HBaseConfiguration.create();
HRegionServer mockServer = mock(HRegionServer.clreplaced);
when(mockServer.isStopped()).thenReturn(false);
when(mockServer.getConfiguration()).thenReturn(conf);
when(mockServer.getCreplacedService()).thenReturn(new CreplacedService("test"));
CompactSplitThread cst = new CompactSplitThread(mockServer);
when(mockServer.getCompactSplitThread()).thenReturn(cst);
// prevent large compaction thread pool stealing job from small compaction queue.
cst.shutdownLongCompactions();
// Set up the region mock that redirects compactions.
HRegion r = mock(HRegion.clreplaced);
when(r.compact(any(CompactionContext.clreplaced), any(Store.clreplaced), any(CompactionThroughputController.clreplaced), any(User.clreplaced))).then(new Answer<Boolean>() {
public Boolean answer(InvocationOnMock invocation) throws Throwable {
invocation.getArgumentAt(0, CompactionContext.clreplaced).compact(invocation.getArgumentAt(2, CompactionThroughputController.clreplaced));
return true;
}
});
// Set up store mocks for 2 "real" stores and the one we use for blocking CST.
ArrayList<Integer> results = new ArrayList<Integer>();
StoreMockMaker sm = new StoreMockMaker(results), sm2 = new StoreMockMaker(results);
Store store = sm.createStoreMock("store1"), store2 = sm2.createStoreMock("store2");
BlockingStoreMockMaker blocker = new BlockingStoreMockMaker();
// First, block the compaction thread so that we could muck with queue.
cst.requestSystemCompaction(r, blocker.createStoreMock(1, "b-pri1"), "b-pri1");
BlockingStoreMockMaker.BlockingCompactionContext currentBlock = blocker.waitForBlocking();
// Add 4 files to store1, 3 to store2, and queue compactions; pri 3 and 4 respectively.
for (int i = 0; i < 4; ++i) {
sm.notCompacting.add(createFile());
}
cst.requestSystemCompaction(r, store, "s1-pri3");
for (int i = 0; i < 3; ++i) {
sm2.notCompacting.add(createFile());
}
cst.requestSystemCompaction(r, store2, "s2-pri4");
// Now add 2 more files to store1 and queue compaction - pri 1.
for (int i = 0; i < 2; ++i) {
sm.notCompacting.add(createFile());
}
cst.requestSystemCompaction(r, store, "s1-pri1");
// Finally add blocking compaction with priority 2.
cst.requestSystemCompaction(r, blocker.createStoreMock(2, "b-pri2"), "b-pri2");
// Unblock the blocking compaction; we should run pri1 and become block again in pri2.
currentBlock.unblock();
currentBlock = blocker.waitForBlocking();
// Pri1 should have "compacted" all 6 files.
replacedertEquals(1, results.size());
replacedertEquals(6, results.get(0).intValue());
// Add 2 files to store 1 (it has 2 files now).
for (int i = 0; i < 2; ++i) {
sm.notCompacting.add(createFile());
}
// Now we have pri4 for store 2 in queue, and pri3 for store1; store1's current priority
// is 5, however, so it must not preempt store 2. Add blocking compaction at the end.
cst.requestSystemCompaction(r, blocker.createStoreMock(7, "b-pri7"), "b-pri7");
currentBlock.unblock();
currentBlock = blocker.waitForBlocking();
replacedertEquals(3, results.size());
// 3 files should go before 2 files.
replacedertEquals(3, results.get(1).intValue());
replacedertEquals(2, results.get(2).intValue());
currentBlock.unblock();
cst.interruptIfNecessary();
}
private static StoreFile createFile() throws Exception {
StoreFile sf = mock(StoreFile.clreplaced);
when(sf.getPath()).thenReturn(new Path("file"));
StoreFile.Reader r = mock(StoreFile.Reader.clreplaced);
when(r.length()).thenReturn(10L);
when(sf.getReader()).thenReturn(r);
return sf;
}
/**
* Simple {@link CompactionRequest} on which you can wait until the requested compaction finishes.
*/
public static clreplaced TrackableCompactionRequest extends CompactionRequest {
private CountDownLatch done;
/**
* Constructor for a custom compaction. Uses the setXXX methods to update the state of the
* compaction before being used.
*/
public TrackableCompactionRequest(CountDownLatch finished) {
super();
this.done = finished;
}
@Override
public void afterExecute() {
super.afterExecute();
this.done.countDown();
}
}
}
19
Source : TestBulkLoad.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private HRegion testRegionWithFamiliesAndSpecifiedTableName(TableName tableName, byte[]... families) throws IOException {
HRegionInfo hRegionInfo = new HRegionInfo(tableName);
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
for (byte[] family : families) {
hTableDescriptor.addFamily(new HColumnDescriptor(family));
}
// TODO We need a way to do this without creating files
return HRegion.createHRegion(hRegionInfo, new Path(testFolder.newFolder().toURI()), conf, hTableDescriptor, log);
}
19
Source : TestCloseRegionHandler.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test of the {@link CloseRegionHandler}.
*/
@Category(MediumTests.clreplaced)
public clreplaced TestCloseRegionHandler {
static final Log LOG = LogFactory.getLog(TestCloseRegionHandler.clreplaced);
private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
private static final HTableDescriptor TEST_HTD = new HTableDescriptor(TableName.valueOf("TestCloseRegionHandler"));
private HRegionInfo TEST_HRI;
private int testIndex = 0;
@BeforeClreplaced
public static void before() throws Exception {
HTU.getConfiguration().setBoolean("hbase.replacedignment.usezk", true);
HTU.startMiniZKCluster();
}
@AfterClreplaced
public static void after() throws IOException {
HTU.shutdownMiniZKCluster();
}
/**
* Before each test, use a different HRI, so the different tests
* don't interfere with each other. This allows us to use just
* a single ZK cluster for the whole suite.
*/
@Before
public void setupHRI() {
TEST_HRI = new HRegionInfo(TEST_HTD.getTableName(), Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
testIndex++;
}
/**
* Test that if we fail a flush, abort gets set on close.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a>
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test
public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException {
final Server server = new MockServer(HTU, false);
final RegionServerServices rss = HTU.createMockRegionServerService();
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW);
HRegion region = HTU.createLocalHRegion(hri, htd);
try {
replacedertNotNull(region);
// Spy on the region so can throw exception when close is called.
HRegion spy = Mockito.spy(region);
final boolean abort = false;
Mockito.when(spy.close(abort)).thenThrow(new IOException("Mocked failed close!"));
// The CloseRegionHandler will try to get an HRegion that corresponds
// to the preplaceded hri -- so insert the region into the online region Set.
rss.addToOnlineRegions(spy);
// replacedert the Server is NOT stopped before we call close region.
replacedertFalse(server.isStopped());
ZkCoordinatedStateManager consensusProvider = new ZkCoordinatedStateManager();
consensusProvider.initialize(server);
consensusProvider.start();
ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails();
zkCrd.setPublishStatusInZk(false);
zkCrd.setExpectedVersion(-1);
CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, consensusProvider.getCloseRegionCoordination(), zkCrd);
boolean throwable = false;
try {
handler.process();
} catch (Throwable t) {
throwable = true;
} finally {
replacedertTrue(throwable);
// Abort calls stop so stopped flag should be set.
replacedertTrue(server.isStopped());
}
} finally {
HRegion.closeHRegion(region);
}
}
/**
* Test if close region can handle ZK closing node version mismatch
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
* @throws DeserializationException
*/
@Test
public void testZKClosingNodeVersionMismatch() throws IOException, NodeExistsException, KeeperException, DeserializationException {
final Server server = new MockServer(HTU);
final RegionServerServices rss = HTU.createMockRegionServerService();
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = TEST_HRI;
ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager();
coordinationProvider.initialize(server);
coordinationProvider.start();
// open a region first so that it can be closed later
OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination());
// close the region
// Create it CLOSING, which is what Master set before sending CLOSE RPC
int versionOfClosingNode = ZKreplacedign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName());
// The CloseRegionHandler will validate the expected version
// Given it is set to invalid versionOfClosingNode+1,
// CloseRegionHandler should be M_ZK_REGION_CLOSING
ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails();
zkCrd.setPublishStatusInZk(true);
zkCrd.setExpectedVersion(versionOfClosingNode + 1);
CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd);
handler.process();
// Handler should remain in M_ZK_REGION_CLOSING
RegionTransition rt = RegionTransition.parseFrom(ZKreplacedign.getData(server.getZooKeeper(), hri.getEncodedName()));
replacedertTrue(rt.getEventType().equals(EventType.M_ZK_REGION_CLOSING));
}
/**
* Test if the region can be closed properly
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
*/
@Test
public void testCloseRegion() throws IOException, NodeExistsException, KeeperException, DeserializationException {
final Server server = new MockServer(HTU);
final RegionServerServices rss = HTU.createMockRegionServerService();
HTableDescriptor htd = TEST_HTD;
HRegionInfo hri = TEST_HRI;
ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager();
coordinationProvider.initialize(server);
coordinationProvider.start();
// open a region first so that it can be closed later
OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination());
// close the region
// Create it CLOSING, which is what Master set before sending CLOSE RPC
int versionOfClosingNode = ZKreplacedign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName());
// The CloseRegionHandler will validate the expected version
// Given it is set to correct versionOfClosingNode,
// CloseRegionHandlerit should be RS_ZK_REGION_CLOSED
ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails();
zkCrd.setPublishStatusInZk(true);
zkCrd.setExpectedVersion(versionOfClosingNode);
CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd);
handler.process();
// Handler should have transitioned it to RS_ZK_REGION_CLOSED
RegionTransition rt = RegionTransition.parseFrom(ZKreplacedign.getData(server.getZooKeeper(), hri.getEncodedName()));
replacedertTrue(rt.getEventType().equals(EventType.RS_ZK_REGION_CLOSED));
}
private void OpenRegion(Server server, RegionServerServices rss, HTableDescriptor htd, HRegionInfo hri, OpenRegionCoordination coordination) throws IOException, NodeExistsException, KeeperException, DeserializationException {
// Create it OFFLINE node, which is what Master set before sending OPEN RPC
ZKreplacedign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
OpenRegionCoordination.OpenRegionDetails ord = coordination.getDetailsForNonCoordinatedOpening();
OpenRegionHandler openHandler = new OpenRegionHandler(server, rss, hri, htd, -1, coordination, ord);
rss.getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(), Boolean.TRUE);
openHandler.process();
// This parse is not used?
RegionTransition.parseFrom(ZKreplacedign.getData(server.getZooKeeper(), hri.getEncodedName()));
// delete the node, which is what Master do after the region is opened
ZKreplacedign.deleteNode(server.getZooKeeper(), hri.getEncodedName(), EventType.RS_ZK_REGION_OPENED, server.getServerName());
}
}
19
Source : TestCatalogJanitor.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private HTableDescriptor createHTableDescriptor() {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t"));
htd.addFamily(new HColumnDescriptor("f"));
return htd;
}
19
Source : TestCatalogJanitor.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test that we correctly archive all the storefiles when a region is deleted
* @throws Exception
*/
@Test
public void testSplitParentFirstComparator() {
SplitParentFirstComparator comp = new SplitParentFirstComparator();
final HTableDescriptor htd = createHTableDescriptor();
/* Region splits:
*
* rootRegion --- firstRegion --- firstRegiona
* | |- firstRegionb
* |
* |- lastRegion --- lastRegiona --- lastRegionaa
* | |- lastRegionab
* |- lastRegionb
*
* rootRegion : [] - []
* firstRegion : [] - bbb
* lastRegion : bbb - []
* firstRegiona : [] - aaa
* firstRegionb : aaa - bbb
* lastRegiona : bbb - ddd
* lastRegionb : ddd - []
*/
// root region
HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, true);
HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("bbb"), true);
HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), HConstants.EMPTY_END_ROW, true);
replacedertTrue(comp.compare(rootRegion, rootRegion) == 0);
replacedertTrue(comp.compare(firstRegion, firstRegion) == 0);
replacedertTrue(comp.compare(lastRegion, lastRegion) == 0);
replacedertTrue(comp.compare(rootRegion, firstRegion) < 0);
replacedertTrue(comp.compare(rootRegion, lastRegion) < 0);
replacedertTrue(comp.compare(firstRegion, lastRegion) < 0);
// first region split into a, b
HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("aaa"), true);
HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), true);
// last region split into a, b
HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ddd"), true);
HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"), HConstants.EMPTY_END_ROW, true);
replacedertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
replacedertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
replacedertTrue(comp.compare(rootRegion, firstRegiona) < 0);
replacedertTrue(comp.compare(rootRegion, firstRegionb) < 0);
replacedertTrue(comp.compare(firstRegion, firstRegiona) < 0);
replacedertTrue(comp.compare(firstRegion, firstRegionb) < 0);
replacedertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
replacedertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
replacedertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
replacedertTrue(comp.compare(rootRegion, lastRegiona) < 0);
replacedertTrue(comp.compare(rootRegion, lastRegionb) < 0);
replacedertTrue(comp.compare(lastRegion, lastRegiona) < 0);
replacedertTrue(comp.compare(lastRegion, lastRegionb) < 0);
replacedertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
replacedertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
replacedertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
replacedertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
replacedertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false);
HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), false);
replacedertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
replacedertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
replacedertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
}
19
Source : TestCreateTableProcedure.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@After
public void tearDown() throws Exception {
resetProcExecutorTestingKillFlag();
for (HTableDescriptor htd : UTIL.getHBaseAdmin().listTables()) {
LOG.info("Tear down, remove table=" + htd.getTableName());
UTIL.deleteTable(htd.getTableName());
}
}
19
Source : MasterProcedureTestingUtility.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (int i = 0; i < family.length; ++i) {
htd.addFamily(new HColumnDescriptor(family[i]));
}
return htd;
}
19
Source : MasterProcedureTestingUtility.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec, final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
HTableDescriptor htd = createHTD(tableName, family);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = ProcedureTestingUtility.submitAndWait(procExec, new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
ProcedureTestingUtility.replacedertProcNotFailed(procExec.getResult(procId));
return regions;
}
19
Source : TestEnableTableHandler.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static void createTable(HBaseTestingUtility testUtil, HBaseAdmin admin, HTableDescriptor htd, byte[][] splitKeys) throws Exception {
// NOTE: We need a latch because admin is not sync,
// so the postOp coprocessor method may be called after the admin operation returned.
MasterSyncObserver observer = (MasterSyncObserver) testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.clreplaced.getName());
observer.tableCreationLatch = new CountDownLatch(1);
if (splitKeys != null) {
admin.createTable(htd, splitKeys);
} else {
admin.createTable(htd);
}
observer.tableCreationLatch.await();
observer.tableCreationLatch = null;
testUtil.waitUntilAllRegionsreplacedigned(htd.getTableName());
}
19
Source : TestLoadIncrementalHFilesSplitRecovery.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Creates a table with given table name,specified number of column families<br>
* and splitkeys if the table does not already exist.
* @param table
* @param cfs
* @param SPLIT_KEYS
*/
private void setupTableWithSplitkeys(TableName table, int cfs, byte[][] SPLIT_KEYS) throws IOException {
try {
LOG.info("Creating table " + table);
HTableDescriptor htd = new HTableDescriptor(table);
for (int i = 0; i < cfs; i++) {
htd.addFamily(new HColumnDescriptor(family(i)));
}
util.createTable(htd, SPLIT_KEYS);
} catch (TableExistsException tee) {
LOG.info("Table " + table + " already exists");
}
}
19
Source : TestLoadIncrementalHFiles.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private void runTest(String testName, TableName tableName, BloomType bloomType, boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {
HTableDescriptor htd = buildHTD(tableName, bloomType);
runTest(testName, htd, bloomType, preCreateTable, tableSplitKeys, hfileRanges);
}
19
Source : TestRegionObserverScannerOpenHook.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
Region initHRegion(byte[] tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
for (byte[] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
19
Source : TestConstraints.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Test that if a constraint hasn't been set that there are no problems with
* attempting to remove it.
*
* @throws Throwable
* on failure.
*/
@Test
public void testRemoveUnsetConstraint() throws Throwable {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table"));
Constraints.remove(desc);
Constraints.remove(desc, AlsoWorks.clreplaced);
}
19
Source : TestSnapshotFromClient.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Before
public void setup() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
htd.setRegionReplication(getNumReplicas());
UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
}
19
Source : TestReplicasClient.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@BeforeClreplaced
public static void beforeClreplaced() throws Exception {
// enable store file refreshing
HTU.getConfiguration().setInt(StorefileRefresherCreplaced.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
ConnectionUtils.setupMasterlessConnection(HTU.getConfiguration());
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor hdt = HTU.createTableDescriptor(TestReplicasClient.clreplaced.getSimpleName());
hdt.addCoprocessor(SlowMeCopro.clreplaced.getName());
table = HTU.createTable(hdt, new byte[][] { f }, HTU.getConfiguration());
hriPrimary = table.getRegionLocation(row, false).getRegionInfo();
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
LOG.info("Master is going to be stopped");
TestRegionServerNoMaster.stopMasterAndreplacedignMeta(HTU);
Configuration c = new Configuration(HTU.getConfiguration());
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
LOG.info("Master has stopped");
}
19
Source : TestFromClientSide3.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Test
public void testGetEmptyRow() throws Exception {
// Create a table and put in 1 row
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test")));
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table table = new HTable(TEST_UTIL.getConfiguration(), desc.getTableName());
Put put = new Put(ROW_BYTES);
put.add(FAMILY, COL_QUAL, VAL_BYTES);
table.put(put);
// Try getting the row with an empty row key
Result res = null;
try {
res = table.get(new Get(new byte[0]));
fail();
} catch (IllegalArgumentException e) {
// Expected.
}
replacedertTrue(res == null);
res = table.get(new Get(Bytes.toBytes("r1-not-exist")));
replacedertTrue(res.isEmpty() == true);
res = table.get(new Get(ROW_BYTES));
replacedertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES));
table.close();
}
19
Source : TestAdmin2.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private HRegionServer startAndWriteData(TableName tableName, byte[] value) throws IOException, InterruptedException {
// When the hbase:meta table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME).close();
// Create the test table and open it
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
for (int i = 1; i <= 256; i++) {
// 256 writes should cause 8 log rolls
Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
put.add(HConstants.CATALOG_FAMILY, null, value);
table.put(put);
if (i % 32 == 0) {
// After every 32 writes sleep to let the log roller run
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
// continue
}
}
}
table.close();
return regionServer;
}
19
Source : TestAdmin2.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@After
public void tearDown() throws Exception {
for (HTableDescriptor htd : this.admin.listTables()) {
TEST_UTIL.deleteTable(htd.getName());
}
}
19
Source : ModifyRegionUtils.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Create new set of regions on the specified file-system.
* NOTE: that you should add the regions to hbase:meta after this operation.
*
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, hTableDescriptor.getTableName());
return createRegions(conf, rootDir, tableDir, hTableDescriptor, newRegions, task);
}
19
Source : ModifyRegionUtils.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor, byte[][] splitKeys) {
long regionId = System.currentTimeMillis();
HRegionInfo[] hRegionInfos = null;
if (splitKeys == null || splitKeys.length == 0) {
hRegionInfos = new HRegionInfo[] { new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId) };
} else {
int numRegions = splitKeys.length + 1;
hRegionInfos = new HRegionInfo[numRegions];
byte[] startKey = null;
byte[] endKey = null;
for (int i = 0; i < numRegions; i++) {
endKey = (i == splitKeys.length) ? null : splitKeys[i];
hRegionInfos[i] = new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey, false, regionId);
startKey = endKey;
}
}
return hRegionInfos;
}
19
Source : ModifyRegionUtils.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDir table directory
* @param hTableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir, final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion, final RegionFillTask task) throws IOException {
// 1. Create HRegion
HRegion region = HRegion.createHRegion(newRegion, rootDir, tableDir, conf, hTableDescriptor, null, false, true);
try {
// 2. Custom user code to interact with the created region
if (task != null) {
task.fillRegion(region);
}
} finally {
// 3. Close the new region to flush to disk. Close log file too.
region.close();
}
return region.getRegionInfo();
}
19
Source : ModifyRegionUtils.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Create new set of regions on the specified file-system.
* NOTE: that you should add the regions to hbase:meta after this operation.
*
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) throws IOException {
return createRegions(conf, rootDir, hTableDescriptor, newRegions, null);
}
19
Source : HBaseFsckRepair.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Creates, flushes, and closes a new region.
*/
public static HRegion createHDFSRegionDir(Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
// Close the new region to flush to disk. Close log file too.
HRegion.closeHRegion(region);
return region;
}
19
Source : FSTableDescriptors.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
* preplaceded filesystem. It expects descriptors to be in a file in the
* {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only
* -- i.e. does not modify the filesystem or can be read and write.
*
* <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
* of the table directory in the filesystem.
* It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the
* edit sequenceid: e.g. <code>.tableinfo.0000000003</code>. This sequenceid
* is always increasing. It starts at zero. The table schema file with the
* highest sequenceid has the most recent schema edit. Usually there is one file
* only, the most recent but there may be short periods where there are more
* than one file. Old files are eventually cleaned. Presumption is that there
* will not be lots of concurrent clients making table schema edits. If so,
* the below needs a bit of a reworking and perhaps some supporting api in hdfs.
*/
@InterfaceAudience.Private
public clreplaced FSTableDescriptors implements TableDescriptors {
private static final Log LOG = LogFactory.getLog(FSTableDescriptors.clreplaced);
private final FileSystem fs;
private final Path rootdir;
private final boolean fsreadonly;
private volatile boolean usecache;
private volatile boolean fsvisited;
@VisibleForTesting
long cachehits = 0;
@VisibleForTesting
long invocations = 0;
/**
* The file name prefix used to store HTD in HDFS
*/
static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<TableName, HTableDescriptor>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
private final HTableDescriptor metaTableDescriptor;
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
* conf and the filesystem where that root dir lives.
* This instance can do write operations (is not read only).
*/
public FSTableDescriptors(final Configuration conf) throws IOException {
this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
}
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir) throws IOException {
this(conf, fs, rootdir, false, true);
}
/**
* @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs.
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
super();
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
}
public void setCacheOn() throws IOException {
this.cache.clear();
this.usecache = true;
}
public void setCacheOff() throws IOException {
this.usecache = false;
this.cache.clear();
}
@VisibleForTesting
public boolean isUsecache() {
return this.usecache;
}
/**
* Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@Override
public HTableDescriptor get(final TableName tablename) throws IOException {
invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
cachehits++;
return metaTableDescriptor;
}
// hbase:meta is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception.
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNamereplacedtring())) {
throw new IOException("No descriptor found for non table = " + tablename);
}
if (usecache) {
// Look in cache of descriptors.
HTableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
}
HTableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e);
} catch (IOException ioe) {
LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, ioe);
}
// last HTD written wins
if (usecache && tdmt != null) {
this.cache.put(tablename, tdmt);
}
return tdmt;
}
/**
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, HTableDescriptor> getAll() throws IOException {
Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
if (fsvisited && usecache) {
for (Map.Entry<TableName, HTableDescriptor> entry : this.cache.entrySet()) {
htds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNamereplacedtring(), HTableDescriptor.META_TABLEDESC);
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
}
if (htd == null) {
allvisited = false;
continue;
} else {
htds.put(htd.getTableName().getNamereplacedtring(), htd);
}
fsvisited = allvisited;
}
}
return htds;
}
/* (non-Javadoc)
* @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
*/
@Override
public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
List<Path> tableDirs = FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
for (Path d : tableDirs) {
HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
}
if (htd == null)
continue;
htds.put(FSUtils.getTableName(d).getNamereplacedtring(), htd);
}
return htds;
}
/**
* Adds (or updates) the table descriptor to the FileSystem
* and updates the local cache with it.
*/
@Override
public void add(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
throw new NotImplementedException();
}
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNamereplacedtring())) {
throw new NotImplementedException("Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNamereplacedtring());
}
updateTableDescriptor(htd);
}
/**
* Removes the table descriptor from the local cache and returns it.
* If not in read only mode, it also deletes the entire table directory(!)
* from the FileSystem.
*/
@Override
public HTableDescriptor remove(final TableName tablename) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
}
Path tabledir = getTableDir(tablename);
if (this.fs.exists(tabledir)) {
if (!this.fs.delete(tabledir, true)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
HTableDescriptor descriptor = this.cache.remove(tablename);
if (descriptor == null) {
return null;
} else {
return descriptor;
}
}
/**
* Checks if a current table info file exists for the given table
*
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public boolean isTableInfoExists(TableName tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
*/
private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
Path tableDir = getTableDir(tableName);
return getTableInfoPath(tableDir);
}
private FileStatus getTableInfoPath(Path tableDir) throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly);
}
/**
* Find the most current table info file for the table located in the given table directory.
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir) throws IOException {
return getTableInfoPath(fs, tableDir, false);
}
/**
* Find the most current table info file for the table in the given table directory.
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if
* present or no sequence number at all if none exist (for backward compatibility from before
* there were sequence numbers).
* If there are multiple table info files found and removeOldFiles is true it also deletes the
* older files.
*
* @return The file status of the current table info file or null if none exist
* @throws IOException
*/
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles) throws IOException {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
}
/**
* Find the most current table info file in the given directory
*
* Looks within the given directory for any table info files
* and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
* If there are multiple possible files found
* and the we're not in read only mode it also deletes the older files.
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
// only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles) throws IOException {
FileStatus[] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
if (status == null || status.length < 1)
return null;
FileStatus mostCurrent = null;
for (FileStatus file : status) {
if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
mostCurrent = file;
}
}
if (removeOldFiles && status.length > 1) {
// Clean away old versions
for (FileStatus file : status) {
Path path = file.getPath();
if (file != mostCurrent) {
if (!fs.delete(file.getPath(), false)) {
LOG.warn("Failed cleanup of " + path);
} else {
LOG.debug("Cleaned up old tableinfo file " + path);
}
}
}
}
return mostCurrent;
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
* reverse order.
*/
@VisibleForTesting
static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR = new Comparator<FileStatus>() {
@Override
public int compare(FileStatus left, FileStatus right) {
return right.compareTo(left);
}
};
/**
* Return the table directory in HDFS
*/
@VisibleForTesting
Path getTableDir(final TableName tableName) {
return FSUtils.getTableDir(rootdir, tableName);
}
private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
}
};
/**
* Width of the sequenceid that is a suffix on a tableinfo file.
*/
@VisibleForTesting
static final int WIDTH_OF_SEQUENCE_ID = 10;
/*
* @param number Number to use as suffix.
* @return Returns zero-prefixed decimal version of preplaceded
* number (Does absolute in case number is negative).
*/
private static String formatTableInfoSequenceId(final int number) {
byte[] b = new byte[WIDTH_OF_SEQUENCE_ID];
int d = Math.abs(number);
for (int i = b.length - 1; i >= 0; i--) {
b[i] = (byte) ((d % 10) + '0');
d /= 10;
}
return Bytes.toString(b);
}
/**
* Regex to eat up sequenceid suffix on a .tableinfo file.
* Use regex because may encounter oldstyle .tableinfos where there is no
* sequenceid on the end.
*/
private static final Pattern TABLEINFO_FILE_REGEX = Pattern.compile(TABLEINFO_FILE_PREFIX + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
/**
* @param p Path to a <code>.tableinfo</code> file.
* @return The current editid or 0 if none found.
*/
@VisibleForTesting
static int getTableInfoSequenceId(final Path p) {
if (p == null)
return 0;
Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
if (!m.matches())
throw new IllegalArgumentException(p.toString());
String suffix = m.group(2);
if (suffix == null || suffix.length() <= 0)
return 0;
return Integer.parseInt(m.group(2));
}
/**
* @param sequenceid
* @return Name of tableinfo file.
*/
@VisibleForTesting
static String getTableInfoFileName(final int sequenceid) {
return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
}
/**
* Returns the latest table descriptor for the given table directly from the file system
* if it exists, bypreplaceding the local cache.
* Returns null if it's not found.
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir, rewritePb);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) throws IOException {
return getTableDescriptorFromFs(fs, tableDir, false);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir, boolean rewritePb) throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
}
return readTableDescriptor(fs, status, rewritePb);
}
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, boolean rewritePb) throws IOException {
int len = Ints.checkedCast(status.getLen());
byte[] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
try {
fsDataInputStream.readFully(content);
} finally {
fsDataInputStream.close();
}
HTableDescriptor htd = null;
try {
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
// we have old HTableDescriptor here
try {
HTableDescriptor ohtd = HTableDescriptor.parseFrom(content);
LOG.warn("Found old table descriptor, converting to new format for table " + ohtd.getTableName());
htd = new HTableDescriptor(ohtd);
if (rewritePb)
rewriteTableDescriptor(fs, status, htd);
} catch (DeserializationException e1) {
throw new IOException("content=" + Bytes.toShort(content), e1);
}
}
if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
// Convert the file over to be pb before leaving here.
rewriteTableDescriptor(fs, status, htd);
}
return htd;
}
private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status, final HTableDescriptor td) throws IOException {
Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, td, tableDir, status);
}
/**
* Update table descriptor on the file system
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
@VisibleForTesting
Path updateTableDescriptor(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
}
Path tableDir = getTableDir(htd.getTableName());
Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
if (p == null)
throw new IOException("Failed update");
LOG.info("Updated tableinfo=" + p);
if (usecache) {
this.cache.put(htd.getTableName(), htd);
}
return p;
}
/**
* Deletes all the table descriptor files from the file system.
* Used in unit tests only.
* @throws NotImplementedException if in read only mode
*/
public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDir(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/**
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException {
FileStatus[] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
for (FileStatus file : status) {
Path path = file.getPath();
int sequenceId = getTableInfoSequenceId(path);
if (sequenceId <= maxSequenceId) {
boolean success = FSUtils.delete(fs, path, false);
if (success) {
LOG.debug("Deleted table descriptor at " + path);
} else {
LOG.error("Failed to delete descriptor at " + path);
}
}
}
}
/**
* Attempts to write a new table descriptor to the given table's directory.
* It first writes it to the .tmp dir then uses an atomic rename to move it into place.
* It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
* not already in use.
* Removes the current descriptor file if preplaceded in.
*
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs, final HTableDescriptor htd, final Path tableDir, final FileStatus currentDescriptorFile) throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
// This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
// should help in this case some but its hard to do guarantees in face of
// concurrent schema edits.
int currentSequenceId = currentDescriptorFile == null ? 0 : getTableInfoSequenceId(currentDescriptorFile.getPath());
int newSequenceId = currentSequenceId;
// Put arbitrary upperbound on how often we retry
int retries = 10;
int retrymax = currentSequenceId + retries;
Path tableInfoDirPath = null;
do {
newSequenceId += 1;
String filename = getTableInfoFileName(newSequenceId);
Path tempPath = new Path(tmpTableDir, filename);
if (fs.exists(tempPath)) {
LOG.debug(tempPath + " exists; retrying up to " + retries + " times");
continue;
}
tableInfoDirPath = new Path(tableInfoDir, filename);
try {
writeHTD(fs, tempPath, htd);
fs.mkdirs(tableInfoDirPath.getParent());
if (!fs.rename(tempPath, tableInfoDirPath)) {
throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
}
LOG.debug("Wrote descriptor into: " + tableInfoDirPath);
} catch (IOException ioe) {
// Presume clash of names or something; go around again.
LOG.debug("Failed write and/or rename; retrying", ioe);
if (!FSUtils.deleteDirectory(fs, tempPath)) {
LOG.warn("Failed cleanup of " + tempPath);
}
tableInfoDirPath = null;
continue;
}
break;
} while (newSequenceId < retrymax);
if (tableInfoDirPath != null) {
// if we succeeded, remove old table info files.
deleteTableDescriptorFiles(fs, tableInfoDir, newSequenceId - 1);
}
return tableInfoDirPath;
}
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd) throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
// We used to write this file out as a serialized HTD Writable followed by two '\n's and then
// the toString version of HTD. Now we just write out the pb serialization.
out.write(htd.toByteArray());
} finally {
out.close();
}
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation) throws IOException {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
* Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
* be overwritten
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
* already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs
*/
public boolean createTableDescriptorForTableDirectory(Path tableDir, HTableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
FileStatus status = getTableInfoPath(fs, tableDir);
if (status != null) {
LOG.debug("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
if (readTableDescriptor(fs, status, false).equals(htd)) {
LOG.debug("TableInfo already exists.. Skipping creation");
return false;
}
}
}
}
Path p = writeTableDescriptor(fs, htd, tableDir, status);
return p != null;
}
}
See More Examples