Here are the examples of the java api org.apache.hadoop.hdds.conf.OzoneConfiguration taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
558 Examples
19
Source : TestGenerateOzoneRequiredConfigurations.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Tests a valid path and generates ozone-site.xml by calling
* {@code GenerateOzoneRequiredConfigurations#generateConfigurations}.
* Further verifies that all properties have a default value.
*
* @throws Exception
*/
@Test
public void testGenerateConfigurations() throws Exception {
File tempPath = getRandomTempDir();
String[] args = new String[] { tempPath.getAbsolutePath() };
execute(args, "ozone-site.xml has been generated at " + tempPath.getAbsolutePath());
// Fetch file generated by above line
URL url = new File(tempPath.getAbsolutePath() + "/ozone-site.xml").toURI().toURL();
OzoneConfiguration oc = new OzoneConfiguration();
List<OzoneConfiguration.Property> allProperties = oc.readPropertyFromXml(url);
// replacederts all properties have a non-empty value
for (OzoneConfiguration.Property p : allProperties) {
replacedert.replacedertTrue(p.getValue() != null && p.getValue().length() > 0);
}
}
19
Source : OzoneAddress.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Create OzoneClient for S3Commands.
*
* @param conf
* @param omServiceID
* @return OzoneClient
* @throws IOException
* @throws OzoneClientException
*/
public OzoneClient createClientForS3Commands(OzoneConfiguration conf, String omServiceID) throws IOException, OzoneClientException {
if (omServiceID != null) {
// OM HA cluster
if (OmUtils.isOmHAServiceId(conf, omServiceID)) {
return OzoneClientFactory.getRpcClient(omServiceID, conf);
} else {
throw new OzoneClientException("Service ID specified does not match" + " with " + OZONE_OM_SERVICE_IDS_KEY + " defined in the " + "configuration. Configured " + OZONE_OM_SERVICE_IDS_KEY + " are" + conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY));
}
} else {
// If om service id is not specified, consider it as a non-HA cluster.
// But before that check if serviceId is defined. If it is defined
// throw an error om service ID needs to be specified.
if (OmUtils.isServiceIdsDefined(conf)) {
throw new OzoneClientException("Service ID must not" + " be omitted when " + OZONE_OM_SERVICE_IDS_KEY + " is defined. " + "Configured " + OZONE_OM_SERVICE_IDS_KEY + " are " + conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY));
}
return OzoneClientFactory.getRpcClient(conf);
}
}
19
Source : Handler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Base clreplaced for shell commands that connect via Ozone client.
*/
@Command(mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.clreplaced)
// CLI
@SuppressWarnings("squid:S106")
public abstract clreplaced Handler implements Callable<Void> {
protected static final Logger LOG = LoggerFactory.getLogger(Handler.clreplaced);
private OzoneConfiguration conf;
@ParentCommand
private GenericParentCommand parent;
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
public boolean isVerbose() {
return parent.isVerbose();
}
public OzoneConfiguration createOzoneConfiguration() {
return parent.createOzoneConfiguration();
}
protected OzoneAddress getAddress() throws OzoneClientException {
return new OzoneAddress();
}
protected abstract void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException;
/**
* Checks whether the current command should be executed or not.
* If it is skipped, an informational message should be output.
* Eg. some commands only work in secure clusters.
*
* @return true if the command should be executed
*/
protected boolean isApplicable() {
return true;
}
@Override
public Void call() throws Exception {
conf = createOzoneConfiguration();
if (!isApplicable()) {
return null;
}
OzoneAddress address = getAddress();
try (OzoneClient client = createClient(address)) {
if (isVerbose()) {
address.print(out());
}
execute(client, address);
}
return null;
}
protected OzoneClient createClient(OzoneAddress address) throws IOException, OzoneClientException {
return address.createClient(conf);
}
protected boolean securityEnabled() {
boolean enabled = OzoneSecurityUtil.isSecurityEnabled(conf);
if (!enabled) {
err().printf("Error: '%s' operation works only when security is " + "enabled. To enable security set ozone.security.enabled to " + "true.%n", spec.qualifiedName());
}
return enabled;
}
protected void printObjectAsJson(Object o) throws IOException {
out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(o));
}
protected void printMsg(String msg) {
out().println(msg);
}
protected OzoneConfiguration getConf() {
return conf;
}
protected PrintStream out() {
return System.out;
}
protected PrintStream err() {
return System.err;
}
}
19
Source : SQLCLI.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* This is the CLI that can be use to convert an ozone metadata DB into
* a sqlite DB file.
*
* NOTE: user should use this CLI in an offline fashion. Namely, this should not
* be used to convert a DB that is currently being used by Ozone. Instead,
* this should be used to debug and diagnosis closed DB instances.
*/
public clreplaced SQLCLI extends Configured implements Tool {
private Options options;
private BasicParser parser;
private final OzoneConfiguration conf;
// for container.db
private static final String CREATE_CONTAINER_INFO = "CREATE TABLE containerInfo (" + "containerID LONG PRIMARY KEY NOT NULL, " + "replicationType TEXT NOT NULL," + "replicationFactor TEXT NOT NULL," + "usedBytes LONG NOT NULL," + "owner TEXT," + "numberOfKeys LONG)";
private static final String CREATE_DATANODE_INFO = "CREATE TABLE datanodeInfo (" + "hostName TEXT NOT NULL, " + "datanodeUUId TEXT PRIMARY KEY NOT NULL," + "ipAddress TEXT, " + "containerPort INTEGER NOT NULL);";
private static final String INSERT_CONTAINER_INFO = "INSERT INTO containerInfo (containerID, replicationType, " + "replicationFactor, usedBytes, owner, " + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", " + "\"%s\", \"%d\")";
private static final String INSERT_DATANODE_INFO = "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " + "containerPort) " + "VALUES (\"%s\", \"%s\", \"%s\", \"%d\")";
private static final String INSERT_CONTAINER_MEMBERS = "INSERT INTO containerMembers (containerName, datanodeUUID) " + "VALUES (\"%s\", \"%s\")";
// and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
// for openContainer.db
private static final String CREATE_OPEN_CONTAINER = "CREATE TABLE openContainer (" + "containerName TEXT PRIMARY KEY NOT NULL, " + "containerUsed INTEGER NOT NULL)";
private static final String INSERT_OPEN_CONTAINER = "INSERT INTO openContainer (containerName, containerUsed) " + "VALUES (\"%s\", \"%s\")";
// for om.db
private static final String CREATE_VOLUME_LIST = "CREATE TABLE volumeList (" + "userName TEXT NOT NULL," + "volumeName TEXT NOT NULL," + "PRIMARY KEY (userName, volumeName))";
private static final String INSERT_VOLUME_LIST = "INSERT INTO volumeList (userName, volumeName) " + "VALUES (\"%s\", \"%s\")";
private static final String CREATE_VOLUME_INFO = "CREATE TABLE volumeInfo (" + "adminName TEXT NOT NULL," + "ownerName TEXT NOT NULL," + "volumeName TEXT NOT NULL," + "PRIMARY KEY (adminName, ownerName, volumeName))";
private static final String INSERT_VOLUME_INFO = "INSERT INTO volumeInfo (adminName, ownerName, volumeName) " + "VALUES (\"%s\", \"%s\", \"%s\")";
private static final String CREATE_ACL_INFO = "CREATE TABLE aclInfo (" + "adminName TEXT NOT NULL," + "ownerName TEXT NOT NULL," + "volumeName TEXT NOT NULL," + "type TEXT NOT NULL," + "userName TEXT NOT NULL," + "rights TEXT NOT NULL," + "FOREIGN KEY (adminName, ownerName, volumeName, userName, type)" + "REFERENCES " + "volumeInfo(adminName, ownerName, volumeName, userName, type)" + "PRIMARY KEY (adminName, ownerName, volumeName, userName, type))";
private static final String INSERT_ACL_INFO = "INSERT INTO aclInfo (adminName, ownerName, volumeName, type, " + "userName, rights) " + "VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")";
private static final String CREATE_BUCKET_INFO = "CREATE TABLE bucketInfo (" + "volumeName TEXT NOT NULL," + "bucketName TEXT NOT NULL," + "versionEnabled BOOLEAN NOT NULL," + "storageType TEXT," + "PRIMARY KEY (volumeName, bucketName))";
private static final String INSERT_BUCKET_INFO = "INSERT INTO bucketInfo(volumeName, bucketName, " + "versionEnabled, storageType)" + "VALUES (\"%s\", \"%s\", \"%s\", \"%s\")";
private static final String CREATE_KEY_INFO = "CREATE TABLE keyInfo (" + "volumeName TEXT NOT NULL," + "bucketName TEXT NOT NULL," + "keyName TEXT NOT NULL," + "dataSize INTEGER," + "blockKey TEXT NOT NULL," + "containerName TEXT NOT NULL," + "PRIMARY KEY (volumeName, bucketName, keyName))";
private static final String INSERT_KEY_INFO = "INSERT INTO keyInfo (volumeName, bucketName, keyName, dataSize, " + "blockKey, containerName)" + "VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")";
private static final Logger LOG = LoggerFactory.getLogger(SQLCLI.clreplaced);
public SQLCLI(OzoneConfiguration conf) {
this.options = getOptions();
this.parser = new BasicParser();
this.conf = conf;
}
@SuppressWarnings("static-access")
private Options getOptions() {
Options allOptions = new Options();
Option helpOpt = OptionBuilder.hasArg(false).withLongOpt("help").withDescription("display help message").create("h");
allOptions.addOption(helpOpt);
Option dbPathOption = OptionBuilder.withArgName("DB path").withLongOpt("dbPath").hasArgs(1).withDescription("specify DB path").create("p");
allOptions.addOption(dbPathOption);
Option outPathOption = OptionBuilder.withArgName("output path").withLongOpt("outPath").hasArgs(1).withDescription("specify output DB file path").create("o");
allOptions.addOption(outPathOption);
return allOptions;
}
public void displayHelp() {
HelpFormatter helpFormatter = new HelpFormatter();
Options allOpts = getOptions();
helpFormatter.printHelp("hdfs oz_debug -p <DB path>" + " -o <Output DB file path>", allOpts);
}
@Override
public int run(String[] args) throws Exception {
CommandLine commandLine = parseArgs(args);
if (commandLine.hasOption("help")) {
displayHelp();
return 0;
}
if (!commandLine.hasOption("p") || !commandLine.hasOption("o")) {
displayHelp();
return -1;
}
String value = commandLine.getOptionValue("p");
LOG.info("DB path {}", value);
// the value is supposed to be an absolute path to a container file
Path dbPath = Paths.get(value);
if (!Files.exists(dbPath)) {
LOG.error("DB path not exist:{}", dbPath);
}
Path parentPath = dbPath.getParent();
Path dbName = dbPath.getFileName();
if (parentPath == null || dbName == null) {
LOG.error("Error processing db path {}", dbPath);
return -1;
}
value = commandLine.getOptionValue("o");
Path outPath = Paths.get(value);
if (outPath == null || outPath.getParent() == null) {
LOG.error("Error processing output path {}", outPath);
return -1;
}
if (outPath.toFile().isDirectory()) {
LOG.error("The db output path should be a file instead of a directory");
return -1;
}
Path outParentPath = outPath.getParent();
if (outParentPath != null) {
if (!Files.exists(outParentPath)) {
Files.createDirectories(outParentPath);
}
}
LOG.info("Parent path [{}] db name [{}]", parentPath, dbName);
if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath);
} else if (dbName.toString().equals(OM_DB_NAME)) {
LOG.info("Converting om DB");
convertOMDB(dbPath, outPath);
} else {
LOG.error("Unrecognized db name {}", dbName);
}
return 0;
}
private Connection connectDB(String dbPath) throws Exception {
Clreplaced.forName("org.sqlite.JDBC");
String connectPath = String.format("jdbc:sqlite:%s", dbPath);
return DriverManager.getConnection(connectPath);
}
private void executeSQL(Connection conn, String sql) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate(sql);
}
}
/**
* Convert om.db to sqlite db file. With following schema.
* (* for primary key)
*
* 1. for key type USER, it contains a username and a list volumes
* volumeList
* --------------------------------
* userName* | volumeName*
* --------------------------------
*
* 2. for key type VOLUME:
*
* volumeInfo
* ----------------------------------------------
* adminName | ownerName* | volumeName* | aclID
* ----------------------------------------------
*
* aclInfo
* ----------------------------------------------
* aclEntryID* | type* | userName* | rights
* ----------------------------------------------
*
* 3. for key type BUCKET
* bucketInfo
* --------------------------------------------------------
* volumeName* | bucketName* | versionEnabled | storageType
* --------------------------------------------------------
*
* TODO : the following table will be changed when key parreplacedion is added.
* Only has the minimum entries for test purpose now.
* 4. for key type KEY
* -----------------------------------------------
* volumeName* | bucketName* | keyName* | dataSize
* -----------------------------------------------
*
* @param dbPath
* @param outPath
* @throws Exception
*/
private void convertOMDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create tables for sql om db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_VOLUME_LIST);
executeSQL(conn, CREATE_VOLUME_INFO);
executeSQL(conn, CREATE_ACL_INFO);
executeSQL(conn, CREATE_BUCKET_INFO);
executeSQL(conn, CREATE_KEY_INFO);
dbStore.iterate(null, (key, value) -> {
String keyString = StringUtils.bytes2String(key);
KeyType type = getKeyType(keyString);
try {
insertOMDB(conn, type, keyString, value);
} catch (IOException | SQLException ex) {
LOG.error("Exception inserting key {} type {}", keyString, type, ex);
}
return true;
});
}
}
private void insertOMDB(Connection conn, KeyType type, String keyName, byte[] value) throws IOException, SQLException {
switch(type) {
case USER:
UserVolumeInfo volumeList = UserVolumeInfo.parseFrom(value);
for (String volumeName : volumeList.getVolumeNamesList()) {
String insertVolumeList = String.format(INSERT_VOLUME_LIST, keyName, volumeName);
executeSQL(conn, insertVolumeList);
}
break;
case VOLUME:
VolumeInfo volumeInfo = VolumeInfo.parseFrom(value);
String adminName = volumeInfo.getAdminName();
String ownerName = volumeInfo.getOwnerName();
String volumeName = volumeInfo.getVolume();
String insertVolumeInfo = String.format(INSERT_VOLUME_INFO, adminName, ownerName, volumeName);
executeSQL(conn, insertVolumeInfo);
for (OzoneAclInfo aclInfo : volumeInfo.getVolumeAclsList()) {
String insertAclInfo = String.format(INSERT_ACL_INFO, adminName, ownerName, volumeName, aclInfo.getType(), aclInfo.getName(), aclInfo.getRights());
executeSQL(conn, insertAclInfo);
}
break;
case BUCKET:
BucketInfo bucketInfo = BucketInfo.parseFrom(value);
String insertBucketInfo = String.format(INSERT_BUCKET_INFO, bucketInfo.getVolumeName(), bucketInfo.getBucketName(), bucketInfo.getIsVersionEnabled(), bucketInfo.getStorageType());
executeSQL(conn, insertBucketInfo);
break;
case KEY:
KeyInfo keyInfo = KeyInfo.parseFrom(value);
// TODO : the two fields container name and block id are no longer used,
// need to revisit this later.
String insertKeyInfo = String.format(INSERT_KEY_INFO, keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getDataSize(), "EMPTY", "EMPTY");
executeSQL(conn, insertKeyInfo);
break;
default:
throw new IOException("Unknown key from om.db");
}
}
// TODO: This has to be fixed.
// we don't have prefix anymore. now each key is written into different
// table. The logic has to be changed.
private KeyType getKeyType(String key) {
if (key.startsWith(OM_USER_PREFIX)) {
return KeyType.USER;
} else if (key.startsWith(OM_KEY_PREFIX)) {
return key.replaceFirst(OM_KEY_PREFIX, "").contains(OM_KEY_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
} else {
return KeyType.KEY;
}
}
private enum KeyType {
USER, VOLUME, BUCKET, KEY, UNKNOWN
}
/**
* Convert container.db to sqlite. The schema of sql db:
* three tables, containerId, containerMachines, datanodeInfo
* (* for primary key)
*
* containerInfo:
* ----------------------------------------------
* container name* | container lead datanode uuid
* ----------------------------------------------
*
* containerMembers:
* --------------------------------
* container name* | datanodeUUid*
* --------------------------------
*
* datanodeInfo:
* ---------------------------------------------------------
* hostname | datanodeUUid* | xferPort | ipcPort
* ---------------------------------------------------------
*
* --------------------------------
* | containerPort
* --------------------------------
*
* @param dbPath path to container db.
* @param outPath path to output sqlite
* @throws IOException throws exception.
*/
private void convertContainerDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create tables for sql container db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_CONTAINER_INFO);
dbStore.iterate(null, (key, value) -> {
long containerID = Longs.fromByteArray(key);
ContainerInfo containerInfo = null;
containerInfo = ContainerInfo.fromProtobuf(HddsProtos.ContainerInfoProto.PARSER.parseFrom(value));
Preconditions.checkNotNull(containerInfo);
try {
// TODO: include container state to sqllite schema
insertContainerDB(conn, containerInfo, containerID);
return true;
} catch (SQLException e) {
throw new IOException(e);
}
});
}
}
/**
* Insert into the sqlite DB of container.db.
* @param conn the connection to the sqlite DB.
* @param containerInfo
* @param containerID
* @throws SQLException throws exception.
*/
private void insertContainerDB(Connection conn, ContainerInfo containerInfo, long containerID) throws SQLException {
LOG.info("Insert to sql container db, for container {}", containerID);
String insertContainerInfo = String.format(INSERT_CONTAINER_INFO, containerID, containerInfo.getReplicationType(), containerInfo.getReplicationFactor(), containerInfo.getUsedBytes(), containerInfo.getOwner(), containerInfo.getNumberOfKeys());
executeSQL(conn, insertContainerInfo);
LOG.info("Insertion completed.");
}
/**
* Convert openContainer.db to sqlite db file. This is rather simple db,
* the schema has only one table:
*
* openContainer
* -------------------------------
* containerName* | containerUsed
* -------------------------------
*
* @param dbPath path to container db.
* @param outPath path to output sqlite
* @throws IOException throws exception.
*/
private void convertOpenContainerDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create table for open container db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_OPEN_CONTAINER);
dbStore.iterate(null, (key, value) -> {
String containerName = StringUtils.bytes2String(key);
Long containerUsed = Long.parseLong(StringUtils.bytes2String(value));
String insertOpenContainer = String.format(INSERT_OPEN_CONTAINER, containerName, containerUsed);
try {
executeSQL(conn, insertOpenContainer);
return true;
} catch (SQLException e) {
throw new IOException(e);
}
});
}
}
private CommandLine parseArgs(String[] argv) throws ParseException {
return parser.parse(options, argv);
}
public static void main(String[] args) {
Tool shell = new SQLCLI(new OzoneConfiguration());
int res = 0;
try {
ToolRunner.run(shell, args);
} catch (Exception ex) {
LOG.error(ex.toString());
if (LOG.isDebugEnabled()) {
LOG.debug("Command execution failed", ex);
}
res = 1;
}
System.exit(res);
}
}
19
Source : GenesisUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static void configureSCM(OzoneConfiguration conf, int numHandlers) {
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS);
conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS);
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS);
conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS);
conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numHandlers);
}
19
Source : GenesisUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static MetadataStore getMetadataStore(String dbType) throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder();
builder.setConf(conf);
builder.setCreateIfMissing(true);
builder.setDbFile(getTempPath().resolve(RandomStringUtils.randomNumeric(DB_FILE_LEN)).toFile());
switch(dbType) {
case DEFAULT_TYPE:
break;
case CLOSED_TYPE:
break;
case CACHE_10MB_TYPE:
builder.setCacheSize((long) StorageUnit.MB.toBytes(10));
break;
case CACHE_1GB_TYPE:
builder.setCacheSize((long) StorageUnit.GB.toBytes(1));
break;
default:
throw new IllegalStateException("Unknown type: " + dbType);
}
return builder.build();
}
19
Source : GenesisUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static OzoneManager getOm(OzoneConfiguration conf) throws IOException, AuthenticationException {
OMStorage omStorage = new OMStorage(conf);
SCMStorageConfig scmStore = new SCMStorageConfig(conf);
if (omStorage.getState() != Storage.StorageState.INITIALIZED) {
omStorage.setClusterId(scmStore.getClusterID());
omStorage.setScmId(scmStore.getScmId());
omStorage.setOmId(UUID.randomUUID().toString());
omStorage.initialize();
}
return OzoneManager.createOm(conf);
}
19
Source : GenesisUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static void configureOM(OzoneConfiguration conf, int numHandlers) {
conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS);
conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numHandlers);
}
19
Source : GenesisUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static StorageContainerManager getScm(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException, AuthenticationException {
SCMStorageConfig scmStore = new SCMStorageConfig(conf);
if (scmStore.getState() != Storage.StorageState.INITIALIZED) {
String clusterId = UUID.randomUUID().toString();
String scmId = UUID.randomUUID().toString();
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
// writes the version file properties
scmStore.initialize();
}
return new StorageContainerManager(conf, configurator);
}
19
Source : BenchMarkSCM.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Setup(Level.Trial)
public static void initialize() throws Exception {
try {
lock.lock();
if (scm == null) {
OzoneConfiguration conf = new OzoneConfiguration();
testDir = GenesisUtil.getTempPath().resolve(RandomStringUtils.randomNumeric(7)).toString();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
GenesisUtil.configureSCM(conf, 10);
conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numContainersPerPipeline);
GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
scm = GenesisUtil.getScm(conf, new SCMConfigurator());
scm.start();
blockManager = scm.getScmBlockManager();
// prepare SCM
PipelineManager pipelineManager = scm.getPipelineManager();
for (Pipeline pipeline : pipelineManager.getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
pipelineManager.openPipeline(pipeline.getId());
}
scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, new SCMSafeModeManager.SafeModeStatus(false, false));
Thread.sleep(1000);
}
} finally {
lock.unlock();
}
}
19
Source : BenchMarkOzoneManager.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Setup(Level.Trial)
public static void initialize() throws Exception {
try {
lock.lock();
if (scm == null) {
OzoneConfiguration conf = new OzoneConfiguration();
testDir = GenesisUtil.getTempPath().resolve(RandomStringUtils.randomNumeric(7)).toString();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir);
GenesisUtil.configureSCM(conf, 10);
GenesisUtil.configureOM(conf, 20);
conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numContainersPerPipeline);
GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf);
scm = GenesisUtil.getScm(conf, new SCMConfigurator());
scm.start();
om = GenesisUtil.getOm(conf);
om.start();
// prepare SCM
PipelineManager pipelineManager = scm.getPipelineManager();
for (Pipeline pipeline : pipelineManager.getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) {
pipelineManager.openPipeline(pipeline.getId());
}
scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, new SCMSafeModeManager.SafeModeStatus(false, false));
Thread.sleep(1000);
// prepare OM
om.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName).setAdminName(UserGroupInformation.getLoginUser().getUserName()).setOwnerName(UserGroupInformation.getLoginUser().getUserName()).build());
om.createBucket(new OmBucketInfo.Builder().setBucketName(bucketName).setVolumeName(volumeName).build());
createKeys(100000);
}
} finally {
lock.unlock();
}
}
19
Source : BenchMarkOMKeyAllocation.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Setup(Level.Trial)
public void setup() throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, path);
OmMetadataManagerImpl omMetadataManager = new OmMetadataManagerImpl(configuration);
volumeManager = new VolumeManagerImpl(omMetadataManager, configuration);
bucketManager = new BucketManagerImpl(omMetadataManager);
volumeManager.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName).setAdminName(UserGroupInformation.getLoginUser().getUserName()).setOwnerName(UserGroupInformation.getLoginUser().getUserName()).build());
bucketManager.createBucket(new OmBucketInfo.Builder().setBucketName(bucketName).setVolumeName(volumeName).build());
keyManager = new KeyManagerImpl(null, omMetadataManager, configuration, UUID.randomUUID().toString(), null);
}
19
Source : BenchMarkOMKeyAllocation.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Benchmark
public void keyCreation() throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, path);
List<OmKeyLocationInfo> keyLocationInfos = getKeyInfoList();
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(UUID.randomUUID().toString()).setDataSize(0).setFactor(HddsProtos.ReplicationFactor.THREE).setType(HddsProtos.ReplicationType.RATIS).build();
OpenKeySession openKeySession = keyManager.openKey(omKeyArgs);
// setting location info list
omKeyArgs.setLocationInfoList(keyLocationInfos);
keyManager.commitKey(omKeyArgs, openKeySession.getId());
}
19
Source : BenchMarkDatanodeDispatcher.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Setup(Level.Trial)
public void initialize() throws IOException {
datanodeUuid = UUID.randomUUID().toString();
// 1 MB of data
data = ByteString.copyFromUtf8(RandomStringUtils.randomAscii(CHUNK_SIZE));
random = new Random();
OzoneConfiguration conf = new OzoneConfiguration();
baseDir = System.getProperty("java.io.tmpdir") + File.separator + datanodeUuid;
// data directory
conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data");
// We need 100 * container size minimum space
conf.set("ozone.scm.container.size", "10MB");
ContainerSet containerSet = new ContainerSet();
volumeSet = new MutableVolumeSet(datanodeUuid, conf);
StateContext context = new StateContext(conf, DatanodeStates.RUNNING, null);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
Handler handler = Handler.getHandlerForContainerType(containerType, conf, "datanodeid", containerSet, volumeSet, metrics, c -> {
});
handler.setScmID("scm");
handlers.put(containerType, handler);
}
dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
dispatcher.init();
containerCount = new AtomicInteger();
keyCount = new AtomicInteger();
chunkCount = new AtomicInteger();
containers = new ArrayList<>();
keys = new ArrayList<>();
chunks = new ArrayList<>();
// Create containers
for (int x = 0; x < INIT_CONTAINERS; x++) {
long containerID = HddsUtils.getTime() + x;
ContainerCommandRequestProto req = getCreateContainerCommand(containerID);
dispatcher.dispatch(req, null);
containers.add(containerID);
containerCount.getAndIncrement();
}
for (int x = 0; x < INIT_KEYS; x++) {
keys.add(HddsUtils.getTime() + x);
}
for (int x = 0; x < INIT_CHUNKS; x++) {
chunks.add("chunk-" + x);
}
// Add chunk and keys to the containers
for (int x = 0; x < INIT_KEYS; x++) {
String chunkName = chunks.get(x);
chunkCount.getAndIncrement();
long key = keys.get(x);
keyCount.getAndIncrement();
for (int y = 0; y < INIT_CONTAINERS; y++) {
long containerID = containers.get(y);
BlockID blockID = new BlockID(containerID, key);
dispatcher.dispatch(getPutBlockCommand(blockID, chunkName), null);
dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName), null);
}
}
}
19
Source : GenerateOzoneRequiredConfigurations.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Generate ozone-site.xml at specified path.
* @param path
* @throws PicocliException
* @throws JAXBException
*/
public static void generateConfigurations(String path) throws PicocliException, JAXBException, IOException {
if (!isValidPath(path)) {
throw new PicocliException("Invalid directory path.");
}
if (!canWrite(path)) {
throw new PicocliException("Insufficient permission.");
}
OzoneConfiguration oc = new OzoneConfiguration();
ClreplacedLoader cL = Thread.currentThread().getContextClreplacedLoader();
if (cL == null) {
cL = OzoneConfiguration.clreplaced.getClreplacedLoader();
}
URL url = cL.getResource("ozone-default.xml");
List<OzoneConfiguration.Property> allProperties = oc.readPropertyFromXml(url);
List<OzoneConfiguration.Property> requiredProperties = new ArrayList<>();
for (OzoneConfiguration.Property p : allProperties) {
if (p.getTag() != null && p.getTag().contains("REQUIRED")) {
if (p.getName().equalsIgnoreCase(OzoneConfigKeys.OZONE_METADATA_DIRS)) {
p.setValue(System.getProperty(OzoneConsts.JAVA_TMP_DIR));
} else if (p.getName().equalsIgnoreCase(OMConfigKeys.OZONE_OM_ADDRESS_KEY) || p.getName().equalsIgnoreCase(ScmConfigKeys.OZONE_SCM_NAMES) || p.getName().equalsIgnoreCase(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY)) {
p.setValue(OzoneConsts.LOCALHOST);
}
requiredProperties.add(p);
}
}
OzoneConfiguration.XMLConfiguration requiredConfig = new OzoneConfiguration.XMLConfiguration();
requiredConfig.setProperties(requiredProperties);
File output = new File(path, "ozone-site.xml");
if (output.createNewFile()) {
JAXBContext context = JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.clreplaced);
Marshaller m = context.createMarshaller();
m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
m.marshal(requiredConfig, output);
System.out.println("ozone-site.xml has been generated at " + path);
} else {
System.out.printf("ozone-site.xml already exists at %s and " + "will not be overwritten%n", path);
}
}
19
Source : ContainerMapper.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static Table getMetaTable(OzoneConfiguration configuration) throws IOException {
OmMetadataManagerImpl metadataManager = new OmMetadataManagerImpl(configuration);
return metadataManager.getKeyTable();
}
19
Source : ContainerMapper.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void main(String[] args) throws IOException {
String path = args[0];
if (path == null) {
throw new IOException("Path cannot be null");
}
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(OZONE_OM_DB_DIRS, path);
ContainerMapper containerMapper = new ContainerMapper();
Map<Long, List<Map<Long, BlockIdDetails>>> dataMap = containerMapper.parseOmDB(configuration);
ObjectMapper mapper = new ObjectMapper();
System.out.println(mapper.writeValuereplacedtring(dataMap));
}
19
Source : RandomKeyGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Data generator tool to generate as much keys as possible.
*/
@Command(name = "randomkeys", aliases = "rk", description = "Generate volumes/buckets and put generated keys.", versionProvider = HddsVersionProvider.clreplaced, mixinStandardHelpOptions = true, showDefaultValues = true)
public final clreplaced RandomKeyGenerator implements Callable<Void> {
@ParentCommand
private Freon freon;
enum FreonOps {
VOLUME_CREATE, BUCKET_CREATE, KEY_CREATE, KEY_WRITE
}
private static final String DURATION_FORMAT = "HH:mm:ss,SSS";
private static final int QUANTILES = 10;
private static final int CHECK_INTERVAL_MILLIS = 5000;
private byte[] keyValueBuffer = null;
private static final String DIGEST_ALGORITHM = "MD5";
// A common initial MesssageDigest for each key without its UUID
private MessageDigest commonInitialMD = null;
private static final Logger LOG = LoggerFactory.getLogger(RandomKeyGenerator.clreplaced);
private volatile boolean completed = false;
private volatile Throwable exception;
@Option(names = { "--num-of-threads", "--numOfThreads" }, description = "number of threads to be launched for the run. Full name " + "--numOfThreads will be removed in later versions.", defaultValue = "10")
private int numOfThreads = 10;
@Option(names = { "--num-of-volumes", "--numOfVolumes" }, description = "specifies number of Volumes to be created in offline " + "mode. Full name --numOfVolumes will be removed in later versions.", defaultValue = "10")
private int numOfVolumes = 10;
@Option(names = { "--num-of-buckets", "--numOfBuckets" }, description = "specifies number of Buckets to be created per Volume. " + "Full name --numOfBuckets will be removed in later versions.", defaultValue = "1000")
private int numOfBuckets = 1000;
@Option(names = { "--num-of-keys", "--numOfKeys" }, description = "specifies number of Keys to be created per Bucket. Full" + " name --numOfKeys will be removed in later versions.", defaultValue = "500000")
private int numOfKeys = 500000;
@Option(names = { "--key-size", "--keySize" }, description = "Specifies the size of Key in bytes to be created. Full" + " name --keySize will be removed in later versions.", defaultValue = "10240")
private long keySize = 10240;
@Option(names = { "--validate-writes", "--validateWrites" }, description = "Specifies whether to validate keys after writing. Full" + " name --validateWrites will be removed in later versions.")
private boolean validateWrites = false;
@Option(names = { "--buffer-size", "--bufferSize" }, description = "Specifies the buffer size while writing. Full name " + "--bufferSize will be removed in later versions.", defaultValue = "4096")
private int bufferSize = 4096;
@Option(names = "--json", description = "directory where json is created.")
private String jsonDir;
@Option(names = { "--replication-type", "--replicationType" }, description = "Replication type (STAND_ALONE, RATIS). Full name " + "--replicationType will be removed in later versions.", defaultValue = "STAND_ALONE")
private ReplicationType type = ReplicationType.STAND_ALONE;
@Option(names = "--factor", description = "Replication factor (ONE, THREE)", defaultValue = "ONE")
private ReplicationFactor factor = ReplicationFactor.ONE;
@Option(names = "--om-service-id", description = "OM Service ID")
private String omServiceID = null;
private int threadPoolSize;
private OzoneClient ozoneClient;
private ObjectStore objectStore;
private ExecutorService executor;
private long startTime;
private long jobStartTime;
private AtomicLong volumeCreationTime;
private AtomicLong bucketCreationTime;
private AtomicLong keyCreationTime;
private AtomicLong keyWriteTime;
private AtomicLong totalBytesWritten;
private int totalBucketCount;
private long totalKeyCount;
private AtomicInteger volumeCounter;
private AtomicInteger bucketCounter;
private AtomicLong keyCounter;
private Map<Integer, OzoneVolume> volumes;
private Map<Integer, OzoneBucket> buckets;
private AtomicInteger numberOfVolumesCreated;
private AtomicInteger numberOfBucketsCreated;
private AtomicLong numberOfKeysAdded;
private Long totalWritesValidated;
private Long writeValidationSuccessCount;
private Long writeValidationFailureCount;
private BlockingQueue<KeyValidate> validationQueue;
private ArrayList<Histogram> histograms = new ArrayList<>();
private OzoneConfiguration ozoneConfiguration;
private ProgressBar progressbar;
RandomKeyGenerator() {
}
@VisibleForTesting
RandomKeyGenerator(OzoneConfiguration ozoneConfiguration) {
this.ozoneConfiguration = ozoneConfiguration;
}
public void init(OzoneConfiguration configuration) throws IOException {
startTime = System.nanoTime();
jobStartTime = System.currentTimeMillis();
volumeCreationTime = new AtomicLong();
bucketCreationTime = new AtomicLong();
keyCreationTime = new AtomicLong();
keyWriteTime = new AtomicLong();
totalBytesWritten = new AtomicLong();
numberOfVolumesCreated = new AtomicInteger();
numberOfBucketsCreated = new AtomicInteger();
numberOfKeysAdded = new AtomicLong();
volumeCounter = new AtomicInteger();
bucketCounter = new AtomicInteger();
keyCounter = new AtomicLong();
volumes = new ConcurrentHashMap<>();
buckets = new ConcurrentHashMap<>();
if (omServiceID != null) {
ozoneClient = OzoneClientFactory.getRpcClient(omServiceID, configuration);
} else {
ozoneClient = OzoneClientFactory.getRpcClient(configuration);
}
objectStore = ozoneClient.getObjectStore();
for (FreonOps ops : FreonOps.values()) {
histograms.add(ops.ordinal(), new Histogram(new UniformReservoir()));
}
if (freon != null) {
freon.startHttpServer();
}
}
@Override
public Void call() throws Exception {
if (ozoneConfiguration != null) {
if (!ozoneConfiguration.getBoolean(HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT)) {
LOG.info("Override validateWrites to false, because " + HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA + " is set to false.");
validateWrites = false;
}
init(ozoneConfiguration);
} else {
init(freon.createOzoneConfiguration());
}
keyValueBuffer = StringUtils.string2Bytes(RandomStringUtils.randomAscii(bufferSize));
// Compute the common initial digest for all keys without their UUID
if (validateWrites) {
commonInitialMD = DigestUtils.getDigest(DIGEST_ALGORITHM);
for (long nrRemaining = keySize; nrRemaining > 0; nrRemaining -= bufferSize) {
int curSize = (int) Math.min(bufferSize, nrRemaining);
commonInitialMD.update(keyValueBuffer, 0, curSize);
}
}
totalBucketCount = numOfVolumes * numOfBuckets;
totalKeyCount = totalBucketCount * numOfKeys;
LOG.info("Number of Threads: {}", numOfThreads);
threadPoolSize = numOfThreads;
executor = Executors.newFixedThreadPool(threadPoolSize);
addShutdownHook();
LOG.info("Number of Volumes: {}.", numOfVolumes);
LOG.info("Number of Buckets per Volume: {}.", numOfBuckets);
LOG.info("Number of Keys per Bucket: {}.", numOfKeys);
LOG.info("Key size: {} bytes", keySize);
LOG.info("Buffer size: {} bytes", bufferSize);
LOG.info("validateWrites : {}", validateWrites);
for (int i = 0; i < numOfThreads; i++) {
executor.execute(new ObjectCreator());
}
Thread validator = null;
if (validateWrites) {
totalWritesValidated = 0L;
writeValidationSuccessCount = 0L;
writeValidationFailureCount = 0L;
validationQueue = new LinkedBlockingQueue<>();
validator = new Thread(new Validator());
validator.start();
LOG.info("Data validation is enabled.");
}
LongSupplier currentValue = numberOfKeysAdded::get;
progressbar = new ProgressBar(System.out, totalKeyCount, currentValue);
LOG.info("Starting progress bar Thread.");
progressbar.start();
// wait until all keys are added or exception occurred.
while ((numberOfKeysAdded.get() != totalKeyCount) && exception == null) {
try {
Thread.sleep(CHECK_INTERVAL_MILLIS);
} catch (InterruptedException e) {
throw e;
}
}
executor.shutdown();
executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
completed = true;
if (exception != null) {
progressbar.terminate();
} else {
progressbar.shutdown();
}
if (validator != null) {
validator.join();
}
ozoneClient.close();
if (exception != null) {
throw new RuntimeException(exception);
}
return null;
}
/**
* Adds ShutdownHook to print statistics.
*/
private void addShutdownHook() {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
printStats(System.out);
if (freon != null) {
freon.stopHttpServer();
}
}));
}
/**
* Prints stats of {@link Freon} run to the PrintStream.
*
* @param out PrintStream
*/
private void printStats(PrintStream out) {
long endTime = System.nanoTime() - startTime;
String execTime = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis(endTime), DURATION_FORMAT);
long volumeTime = TimeUnit.NANOSECONDS.toMillis(volumeCreationTime.get()) / threadPoolSize;
String prettyAverageVolumeTime = DurationFormatUtils.formatDuration(volumeTime, DURATION_FORMAT);
long bucketTime = TimeUnit.NANOSECONDS.toMillis(bucketCreationTime.get()) / threadPoolSize;
String prettyAverageBucketTime = DurationFormatUtils.formatDuration(bucketTime, DURATION_FORMAT);
long averageKeyCreationTime = TimeUnit.NANOSECONDS.toMillis(keyCreationTime.get()) / threadPoolSize;
String prettyAverageKeyCreationTime = DurationFormatUtils.formatDuration(averageKeyCreationTime, DURATION_FORMAT);
long averageKeyWriteTime = TimeUnit.NANOSECONDS.toMillis(keyWriteTime.get()) / threadPoolSize;
String prettyAverageKeyWriteTime = DurationFormatUtils.formatDuration(averageKeyWriteTime, DURATION_FORMAT);
out.println();
out.println("***************************************************");
out.println("Status: " + (exception != null ? "Failed" : "Success"));
out.println("Git Base Revision: " + VersionInfo.getRevision());
out.println("Number of Volumes created: " + numberOfVolumesCreated);
out.println("Number of Buckets created: " + numberOfBucketsCreated);
out.println("Number of Keys added: " + numberOfKeysAdded);
out.println("Ratis replication factor: " + factor.name());
out.println("Ratis replication type: " + type.name());
out.println("Average Time spent in volume creation: " + prettyAverageVolumeTime);
out.println("Average Time spent in bucket creation: " + prettyAverageBucketTime);
out.println("Average Time spent in key creation: " + prettyAverageKeyCreationTime);
out.println("Average Time spent in key write: " + prettyAverageKeyWriteTime);
out.println("Total bytes written: " + totalBytesWritten);
if (validateWrites) {
out.println("Total number of writes validated: " + totalWritesValidated);
out.println("Writes validated: " + (100.0 * totalWritesValidated / numberOfKeysAdded.get()) + " %");
out.println("Successful validation: " + writeValidationSuccessCount);
out.println("Unsuccessful validation: " + writeValidationFailureCount);
}
out.println("Total Execution time: " + execTime);
out.println("***************************************************");
if (jsonDir != null) {
String[][] quantileTime = new String[FreonOps.values().length][QUANTILES + 1];
String[] deviations = new String[FreonOps.values().length];
String[] means = new String[FreonOps.values().length];
for (FreonOps ops : FreonOps.values()) {
Snapshot snapshot = histograms.get(ops.ordinal()).getSnapshot();
for (int i = 0; i <= QUANTILES; i++) {
quantileTime[ops.ordinal()][i] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getValue((1.0 / QUANTILES) * i)), DURATION_FORMAT);
}
deviations[ops.ordinal()] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getStdDev()), DURATION_FORMAT);
means[ops.ordinal()] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getMean()), DURATION_FORMAT);
}
FreonJobInfo jobInfo = new FreonJobInfo().setExecTime(execTime).setGitBaseRevision(VersionInfo.getRevision()).setMeanVolumeCreateTime(means[FreonOps.VOLUME_CREATE.ordinal()]).setDeviationVolumeCreateTime(deviations[FreonOps.VOLUME_CREATE.ordinal()]).setTenQuantileVolumeCreateTime(quantileTime[FreonOps.VOLUME_CREATE.ordinal()]).setMeanBucketCreateTime(means[FreonOps.BUCKET_CREATE.ordinal()]).setDeviationBucketCreateTime(deviations[FreonOps.BUCKET_CREATE.ordinal()]).setTenQuantileBucketCreateTime(quantileTime[FreonOps.BUCKET_CREATE.ordinal()]).setMeanKeyCreateTime(means[FreonOps.KEY_CREATE.ordinal()]).setDeviationKeyCreateTime(deviations[FreonOps.KEY_CREATE.ordinal()]).setTenQuantileKeyCreateTime(quantileTime[FreonOps.KEY_CREATE.ordinal()]).setMeanKeyWriteTime(means[FreonOps.KEY_WRITE.ordinal()]).setDeviationKeyWriteTime(deviations[FreonOps.KEY_WRITE.ordinal()]).setTenQuantileKeyWriteTime(quantileTime[FreonOps.KEY_WRITE.ordinal()]);
String jsonName = new SimpleDateFormat("yyyyMMddHHmmss").format(Time.now()) + ".json";
String jsonPath = jsonDir + "/" + jsonName;
try (FileOutputStream os = new FileOutputStream(jsonPath)) {
ObjectMapper mapper = new ObjectMapper();
mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
writer.writeValue(os, jobInfo);
} catch (FileNotFoundException e) {
out.println("Json File could not be created for the path: " + jsonPath);
out.println(e);
} catch (IOException e) {
out.println("Json object could not be created");
out.println(e);
}
}
}
/**
* Returns the number of volumes created.
*
* @return volume count.
*/
@VisibleForTesting
int getNumberOfVolumesCreated() {
return numberOfVolumesCreated.get();
}
/**
* Returns the number of buckets created.
*
* @return bucket count.
*/
@VisibleForTesting
int getNumberOfBucketsCreated() {
return numberOfBucketsCreated.get();
}
/**
* Returns the number of keys added.
*
* @return keys count.
*/
@VisibleForTesting
long getNumberOfKeysAdded() {
return numberOfKeysAdded.get();
}
/**
* Returns true if random validation of write is enabled.
*
* @return validateWrites
*/
@VisibleForTesting
boolean getValidateWrites() {
return validateWrites;
}
/**
* Returns the number of keys validated.
*
* @return validated key count.
*/
@VisibleForTesting
long getTotalKeysValidated() {
return totalWritesValidated;
}
/**
* Returns the number of successful validation.
*
* @return successful validation count.
*/
@VisibleForTesting
long getSuccessfulValidationCount() {
return writeValidationSuccessCount;
}
/**
* Returns the number of unsuccessful validation.
*
* @return unsuccessful validation count.
*/
@VisibleForTesting
long getUnsuccessfulValidationCount() {
return validateWrites ? writeValidationFailureCount : 0;
}
/**
* Wrapper to hold ozone keyValidate entry.
*/
private static clreplaced KeyValidate {
/**
* Bucket name.
*/
private OzoneBucket bucket;
/**
* Key name.
*/
private String keyName;
/**
* Digest of this key's full value.
*/
private byte[] digest;
/**
* Constructs a new ozone keyValidate.
*
* @param bucket bucket part
* @param keyName key part
* @param digest digest of this key's full value
*/
KeyValidate(OzoneBucket bucket, String keyName, byte[] digest) {
this.bucket = bucket;
this.keyName = keyName;
this.digest = digest;
}
}
private clreplaced ObjectCreator implements Runnable {
@Override
public void run() {
int v;
while ((v = volumeCounter.getAndIncrement()) < numOfVolumes) {
if (!createVolume(v)) {
return;
}
}
int b;
while ((b = bucketCounter.getAndIncrement()) < totalBucketCount) {
if (!createBucket(b)) {
return;
}
}
long k;
while ((k = keyCounter.getAndIncrement()) < totalKeyCount) {
if (!createKey(k)) {
return;
}
}
}
}
private boolean createVolume(int volumeNumber) {
String volumeName = "vol-" + volumeNumber + "-" + RandomStringUtils.randomNumeric(5);
LOG.trace("Creating volume: {}", volumeName);
try (AutoCloseable scope = TracingUtil.createActivatedSpan("createVolume")) {
long start = System.nanoTime();
objectStore.createVolume(volumeName);
long volumeCreationDuration = System.nanoTime() - start;
volumeCreationTime.getAndAdd(volumeCreationDuration);
histograms.get(FreonOps.VOLUME_CREATE.ordinal()).update(volumeCreationDuration);
numberOfVolumesCreated.getAndIncrement();
OzoneVolume volume = objectStore.getVolume(volumeName);
volumes.put(volumeNumber, volume);
return true;
} catch (Throwable e) {
exception = e;
LOG.error("Could not create volume", e);
return false;
}
}
private boolean createBucket(int globalBucketNumber) {
int volumeNumber = globalBucketNumber % numOfVolumes;
int bucketNumber = globalBucketNumber / numOfVolumes;
OzoneVolume volume = getVolume(volumeNumber);
if (volume == null) {
LOG.error("Could not find volume {}", volumeNumber);
return false;
}
String bucketName = "bucket-" + bucketNumber + "-" + RandomStringUtils.randomNumeric(5);
LOG.trace("Creating bucket: {} in volume: {}", bucketName, volume.getName());
try (AutoCloseable scope = TracingUtil.createActivatedSpan("createBucket")) {
long start = System.nanoTime();
volume.createBucket(bucketName);
long bucketCreationDuration = System.nanoTime() - start;
histograms.get(FreonOps.BUCKET_CREATE.ordinal()).update(bucketCreationDuration);
bucketCreationTime.getAndAdd(bucketCreationDuration);
numberOfBucketsCreated.getAndIncrement();
OzoneBucket bucket = volume.getBucket(bucketName);
buckets.put(globalBucketNumber, bucket);
return true;
} catch (Throwable e) {
exception = e;
LOG.error("Could not create bucket ", e);
return false;
}
}
@SuppressFBWarnings("REC_CATCH_EXCEPTION")
private boolean createKey(long globalKeyNumber) {
int globalBucketNumber = (int) (globalKeyNumber % totalBucketCount);
long keyNumber = globalKeyNumber / totalBucketCount;
OzoneBucket bucket = getBucket(globalBucketNumber);
if (bucket == null) {
LOG.error("Could not find bucket {}", globalBucketNumber);
return false;
}
String bucketName = bucket.getName();
String volumeName = bucket.getVolumeName();
String keyName = "key-" + keyNumber + "-" + RandomStringUtils.randomNumeric(5);
LOG.trace("Adding key: {} in bucket: {} of volume: {}", keyName, bucketName, volumeName);
try {
try (AutoCloseable scope = TracingUtil.createActivatedSpan("createKey")) {
long keyCreateStart = System.nanoTime();
try (OzoneOutputStream os = bucket.createKey(keyName, keySize, type, factor, new HashMap<>())) {
long keyCreationDuration = System.nanoTime() - keyCreateStart;
histograms.get(FreonOps.KEY_CREATE.ordinal()).update(keyCreationDuration);
keyCreationTime.getAndAdd(keyCreationDuration);
try (AutoCloseable writeScope = TracingUtil.createActivatedSpan("writeKeyData")) {
long keyWriteStart = System.nanoTime();
for (long nrRemaining = keySize; nrRemaining > 0; nrRemaining -= bufferSize) {
int curSize = (int) Math.min(bufferSize, nrRemaining);
os.write(keyValueBuffer, 0, curSize);
}
long keyWriteDuration = System.nanoTime() - keyWriteStart;
histograms.get(FreonOps.KEY_WRITE.ordinal()).update(keyWriteDuration);
keyWriteTime.getAndAdd(keyWriteDuration);
totalBytesWritten.getAndAdd(keySize);
numberOfKeysAdded.getAndIncrement();
}
}
}
if (validateWrites) {
MessageDigest tmpMD = (MessageDigest) commonInitialMD.clone();
boolean validate = validationQueue.offer(new KeyValidate(bucket, keyName, tmpMD.digest()));
if (validate) {
LOG.trace("Key {} is queued for validation.", keyName);
}
}
return true;
} catch (Throwable e) {
exception = e;
LOG.error("Exception while adding key: {} in bucket: {}" + " of volume: {}.", keyName, bucketName, volumeName, e);
return false;
}
}
private OzoneVolume getVolume(Integer volumeNumber) {
return waitUntilAddedToMap(volumes, volumeNumber);
}
private OzoneBucket getBucket(Integer bucketNumber) {
return waitUntilAddedToMap(buckets, bucketNumber);
}
/**
* Looks up volume or bucket from the cache. Waits for it to be created if
* needed (can happen for the last few items depending on the number of
* threads).
*
* @return may return null if this thread is interrupted, or if any other
* thread encounters an exception (and stores it to {@code exception})
*/
private <T> T waitUntilAddedToMap(Map<Integer, T> map, Integer i) {
while (exception == null && !map.containsKey(i)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
}
return map.get(i);
}
private final clreplaced FreonJobInfo {
private String status;
private String gitBaseRevision;
private String jobStartTime;
private int numOfVolumes;
private int numOfBuckets;
private int numOfKeys;
private int numOfThreads;
private String dataWritten;
private String execTime;
private String replicationFactor;
private String replicationType;
private long keySize;
private int bufferSize;
private String totalThroughputPerSecond;
private String meanVolumeCreateTime;
private String deviationVolumeCreateTime;
private String[] tenQuantileVolumeCreateTime;
private String meanBucketCreateTime;
private String deviationBucketCreateTime;
private String[] tenQuantileBucketCreateTime;
private String meanKeyCreateTime;
private String deviationKeyCreateTime;
private String[] tenQuantileKeyCreateTime;
private String meanKeyWriteTime;
private String deviationKeyWriteTime;
private String[] tenQuantileKeyWriteTime;
private FreonJobInfo() {
this.status = exception != null ? "Failed" : "Success";
this.numOfVolumes = RandomKeyGenerator.this.numOfVolumes;
this.numOfBuckets = RandomKeyGenerator.this.numOfBuckets;
this.numOfKeys = RandomKeyGenerator.this.numOfKeys;
this.numOfThreads = RandomKeyGenerator.this.numOfThreads;
this.keySize = RandomKeyGenerator.this.keySize;
this.bufferSize = RandomKeyGenerator.this.bufferSize;
this.jobStartTime = Time.formatTime(RandomKeyGenerator.this.jobStartTime);
this.replicationFactor = RandomKeyGenerator.this.factor.name();
this.replicationType = RandomKeyGenerator.this.type.name();
long totalBytes = (long) numOfVolumes * numOfBuckets * numOfKeys * keySize;
this.dataWritten = getInStorageUnits((double) totalBytes);
this.totalThroughputPerSecond = getInStorageUnits((totalBytes * 1.0) / TimeUnit.NANOSECONDS.toSeconds(RandomKeyGenerator.this.keyWriteTime.get() / threadPoolSize));
}
private String getInStorageUnits(Double value) {
double size;
OzoneConsts.Units unit;
if ((long) (value / OzoneConsts.TB) != 0) {
size = value / OzoneConsts.TB;
unit = OzoneConsts.Units.TB;
} else if ((long) (value / OzoneConsts.GB) != 0) {
size = value / OzoneConsts.GB;
unit = OzoneConsts.Units.GB;
} else if ((long) (value / OzoneConsts.MB) != 0) {
size = value / OzoneConsts.MB;
unit = OzoneConsts.Units.MB;
} else if ((long) (value / OzoneConsts.KB) != 0) {
size = value / OzoneConsts.KB;
unit = OzoneConsts.Units.KB;
} else {
size = value;
unit = OzoneConsts.Units.B;
}
return size + " " + unit;
}
public FreonJobInfo setGitBaseRevision(String gitBaseRevisionVal) {
gitBaseRevision = gitBaseRevisionVal;
return this;
}
public FreonJobInfo setExecTime(String execTimeVal) {
execTime = execTimeVal;
return this;
}
public FreonJobInfo setMeanKeyWriteTime(String deviationKeyWriteTimeVal) {
this.meanKeyWriteTime = deviationKeyWriteTimeVal;
return this;
}
public FreonJobInfo setDeviationKeyWriteTime(String deviationKeyWriteTimeVal) {
this.deviationKeyWriteTime = deviationKeyWriteTimeVal;
return this;
}
public FreonJobInfo setTenQuantileKeyWriteTime(String[] tenQuantileKeyWriteTimeVal) {
this.tenQuantileKeyWriteTime = tenQuantileKeyWriteTimeVal;
return this;
}
public FreonJobInfo setMeanKeyCreateTime(String deviationKeyWriteTimeVal) {
this.meanKeyCreateTime = deviationKeyWriteTimeVal;
return this;
}
public FreonJobInfo setDeviationKeyCreateTime(String deviationKeyCreateTimeVal) {
this.deviationKeyCreateTime = deviationKeyCreateTimeVal;
return this;
}
public FreonJobInfo setTenQuantileKeyCreateTime(String[] tenQuantileKeyCreateTimeVal) {
this.tenQuantileKeyCreateTime = tenQuantileKeyCreateTimeVal;
return this;
}
public FreonJobInfo setMeanBucketCreateTime(String deviationKeyWriteTimeVal) {
this.meanBucketCreateTime = deviationKeyWriteTimeVal;
return this;
}
public FreonJobInfo setDeviationBucketCreateTime(String deviationBucketCreateTimeVal) {
this.deviationBucketCreateTime = deviationBucketCreateTimeVal;
return this;
}
public FreonJobInfo setTenQuantileBucketCreateTime(String[] tenQuantileBucketCreateTimeVal) {
this.tenQuantileBucketCreateTime = tenQuantileBucketCreateTimeVal;
return this;
}
public FreonJobInfo setMeanVolumeCreateTime(String deviationKeyWriteTimeVal) {
this.meanVolumeCreateTime = deviationKeyWriteTimeVal;
return this;
}
public FreonJobInfo setDeviationVolumeCreateTime(String deviationVolumeCreateTimeVal) {
this.deviationVolumeCreateTime = deviationVolumeCreateTimeVal;
return this;
}
public FreonJobInfo setTenQuantileVolumeCreateTime(String[] tenQuantileVolumeCreateTimeVal) {
this.tenQuantileVolumeCreateTime = tenQuantileVolumeCreateTimeVal;
return this;
}
public String getJobStartTime() {
return jobStartTime;
}
public int getNumOfVolumes() {
return numOfVolumes;
}
public int getNumOfBuckets() {
return numOfBuckets;
}
public int getNumOfKeys() {
return numOfKeys;
}
public int getNumOfThreads() {
return numOfThreads;
}
public String getExecTime() {
return execTime;
}
public String getReplicationFactor() {
return replicationFactor;
}
public String getReplicationType() {
return replicationType;
}
public String getStatus() {
return status;
}
public long getKeySize() {
return keySize;
}
public int getBufferSize() {
return bufferSize;
}
public String getGitBaseRevision() {
return gitBaseRevision;
}
public String getDataWritten() {
return dataWritten;
}
public String getTotalThroughputPerSecond() {
return totalThroughputPerSecond;
}
public String getMeanVolumeCreateTime() {
return meanVolumeCreateTime;
}
public String getDeviationVolumeCreateTime() {
return deviationVolumeCreateTime;
}
public String[] getTenQuantileVolumeCreateTime() {
return tenQuantileVolumeCreateTime;
}
public String getMeanBucketCreateTime() {
return meanBucketCreateTime;
}
public String getDeviationBucketCreateTime() {
return deviationBucketCreateTime;
}
public String[] getTenQuantileBucketCreateTime() {
return tenQuantileBucketCreateTime;
}
public String getMeanKeyCreateTime() {
return meanKeyCreateTime;
}
public String getDeviationKeyCreateTime() {
return deviationKeyCreateTime;
}
public String[] getTenQuantileKeyCreateTime() {
return tenQuantileKeyCreateTime;
}
public String getMeanKeyWriteTime() {
return meanKeyWriteTime;
}
public String getDeviationKeyWriteTime() {
return deviationKeyWriteTime;
}
public String[] getTenQuantileKeyWriteTime() {
return tenQuantileKeyWriteTime;
}
}
/**
* Validates the write done in ozone cluster.
*/
private clreplaced Validator implements Runnable {
@Override
public void run() {
DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
while (true) {
if (completed && validationQueue.isEmpty()) {
return;
}
try {
KeyValidate kv = validationQueue.poll(5, TimeUnit.SECONDS);
if (kv != null) {
OzoneInputStream is = kv.bucket.readKey(kv.keyName);
dig.getMessageDigest().reset();
byte[] curDigest = dig.digest(is);
totalWritesValidated++;
if (MessageDigest.isEqual(kv.digest, curDigest)) {
writeValidationSuccessCount++;
} else {
writeValidationFailureCount++;
LOG.warn("Data validation error for key {}/{}/{}", kv.bucket.getVolumeName(), kv.bucket, kv.keyName);
LOG.warn("Expected checksum: {}, Actual checksum: {}", kv.digest, curDigest);
}
is.close();
}
} catch (IOException ex) {
LOG.error("Exception while validating write.", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
@VisibleForTesting
public void setNumOfVolumes(int numOfVolumes) {
this.numOfVolumes = numOfVolumes;
}
@VisibleForTesting
public void setNumOfBuckets(int numOfBuckets) {
this.numOfBuckets = numOfBuckets;
}
@VisibleForTesting
public void setNumOfKeys(int numOfKeys) {
this.numOfKeys = numOfKeys;
}
@VisibleForTesting
public void setNumOfThreads(int numOfThreads) {
this.numOfThreads = numOfThreads;
}
@VisibleForTesting
public void setKeySize(long keySize) {
this.keySize = keySize;
}
@VisibleForTesting
public void setType(ReplicationType type) {
this.type = type;
}
@VisibleForTesting
public void setFactor(ReplicationFactor factor) {
this.factor = factor;
}
@VisibleForTesting
public void setValidateWrites(boolean validateWrites) {
this.validateWrites = validateWrites;
}
@VisibleForTesting
public int getThreadPoolSize() {
return threadPoolSize;
}
}
19
Source : OzoneClientKeyValidator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
rpcClient = createOzoneClient(omServiceID, ozoneConfiguration);
readReference();
timer = getMetrics().timer("key-validate");
runTests(this::validateKey);
return null;
}
19
Source : OzoneClientKeyGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
contentGenerator = new ContentGenerator(keySize, bufferSize);
metadata = new HashMap<>();
try (OzoneClient rpcClient = createOzoneClient(omServiceID, ozoneConfiguration)) {
ensureVolumeAndBucketExist(rpcClient, volumeName, bucketName);
bucket = rpcClient.getObjectStore().getVolume(volumeName).getBucket(bucketName);
timer = getMetrics().timer("key-create");
runTests(this::createKey);
}
return null;
}
19
Source : OmKeyGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
try (OzoneClient rpcClient = createOzoneClient(omServiceID, ozoneConfiguration)) {
ensureVolumeAndBucketExist(rpcClient, volumeName, bucketName);
ozoneManagerClient = createOmClient(ozoneConfiguration, omServiceID);
timer = getMetrics().timer("key-create");
runTests(this::createKey);
} finally {
if (ozoneManagerClient != null) {
ozoneManagerClient.close();
}
}
return null;
}
19
Source : OmBucketGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
try (OzoneClient rpcClient = createOzoneClient(omServiceID, ozoneConfiguration)) {
ensureVolumeExists(rpcClient, volumeName);
ozoneManagerClient = createOmClient(ozoneConfiguration, omServiceID);
bucketCreationTimer = getMetrics().timer("bucket-create");
runTests(this::createBucket);
} finally {
if (ozoneManagerClient != null) {
ozoneManagerClient.close();
}
}
return null;
}
19
Source : HadoopNestedDirGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
String s;
if (depth <= 0) {
s = "Invalid depth value, depth value should be greater than zero!";
print(s);
} else if (span < 0) {
s = "Invalid span value, span value should be greater or equal to zero!";
print(s);
} else {
init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
runTests(this::createDir);
}
return null;
}
19
Source : HadoopFsValidator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
Path file = new Path(rootPath + "/" + generateObjectName(0));
try (FSDataInputStream stream = fileSystem.open(file)) {
referenceDigest = getDigest(stream);
}
timer = getMetrics().timer("file-read");
runTests(this::validateFile);
return null;
}
19
Source : HadoopDirTreeGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
String s;
if (depth <= 0) {
s = "Invalid depth value, depth value should be greater than zero!";
print(s);
} else if (span <= 0) {
s = "Invalid span value, span value should be greater than zero!";
print(s);
} else {
init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize);
timer = getMetrics().timer("file-create");
runTests(this::createDir);
}
return null;
}
19
Source : Freon.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Ozone data generator and performance test tool.
*/
@Command(name = "ozone freon", description = "Load generator and tester tool for ozone", subcommands = { RandomKeyGenerator.clreplaced, OzoneClientKeyGenerator.clreplaced, OzoneClientKeyValidator.clreplaced, OmKeyGenerator.clreplaced, OmBucketGenerator.clreplaced, HadoopFsGenerator.clreplaced, HadoopNestedDirGenerator.clreplaced, HadoopDirTreeGenerator.clreplaced, HadoopFsValidator.clreplaced, SameKeyReader.clreplaced, S3KeyGenerator.clreplaced, DatanodeChunkGenerator.clreplaced, DatanodeChunkValidator.clreplaced, DatanodeBlockPutter.clreplaced, FollowerAppendLogEntryGenerator.clreplaced, ChunkManagerDiskWrite.clreplaced, LeaderAppendLogEntryGenerator.clreplaced, ClosedContainerReplicator.clreplaced }, versionProvider = HddsVersionProvider.clreplaced, mixinStandardHelpOptions = true)
public clreplaced Freon extends GenericCli {
public static final Logger LOG = LoggerFactory.getLogger(Freon.clreplaced);
public Freon() {
super(Freon.clreplaced);
}
@Option(names = "--server", description = "Enable internal http server to provide metric " + "and profile endpoint")
private boolean httpServer = false;
private final boolean interactive = System.console() != null;
private FreonHttpServer freonHttpServer;
private OzoneConfiguration conf;
@Override
public void execute(String[] argv) {
conf = createOzoneConfiguration();
HddsServerUtil.initializeMetrics(conf, "ozone-freon");
TracingUtil.initTracing("freon", conf);
super.execute(argv);
}
public void stopHttpServer() {
if (freonHttpServer != null) {
try {
freonHttpServer.stop();
} catch (Exception e) {
LOG.error("Freon http server can't be stopped", e);
}
}
}
public void startHttpServer() {
if (httpServer) {
try {
freonHttpServer = new FreonHttpServer(conf);
freonHttpServer.start();
} catch (IOException e) {
LOG.error("Freon http server can't be started", e);
}
}
}
public static void main(String[] args) {
new Freon().run(args);
}
public boolean isInteractive() {
return interactive;
}
}
19
Source : DatanodeChunkValidator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConf = createOzoneConfiguration();
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("Datanode chunk validator is not supported in secure environment");
}
try (StorageContainerLocationProtocol scmLocationClient = createStorageContainerLocationClient(ozoneConf)) {
List<Pipeline> pipelines = scmLocationClient.listPipelines();
Pipeline pipeline;
if (pipelineId != null && pipelineId.length() > 0) {
pipeline = pipelines.stream().filter(p -> p.getId().toString().equals(pipelineId)).findFirst().orElseThrow(() -> new IllegalArgumentException("Pipeline ID is defined, but there is no such pipeline: " + pipelineId));
} else {
pipeline = pipelines.stream().filter(p -> p.getFactor() == HddsProtos.ReplicationFactor.THREE).findFirst().orElseThrow(() -> new IllegalArgumentException("Pipeline ID is NOT defined, and no pipeline " + "has been found with factor=THREE"));
LOG.info("Using pipeline {}", pipeline.getId());
}
try (XceiverClientManager xceiverClientManager = new XceiverClientManager(ozoneConf)) {
xceiverClientSpi = xceiverClientManager.acquireClient(pipeline);
readReference();
timer = getMetrics().timer("chunk-validate");
runTests(this::validateChunk);
}
} finally {
if (xceiverClientSpi != null) {
xceiverClientSpi.close();
}
}
return null;
}
19
Source : DatanodeChunkGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
OzoneConfiguration ozoneConf = createOzoneConfiguration();
xceiverClientManager = new XceiverClientManager(ozoneConf);
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("Datanode chunk generator is not supported in secure environment");
}
List<String> pipelinesFromCmd = Arrays.asList(pipelineIds.split(","));
List<String> datanodeHosts = Arrays.asList(this.datanodes.split(","));
Set<Pipeline> pipelines;
try (StorageContainerLocationProtocol scmLocationClient = createStorageContainerLocationClient(ozoneConf)) {
List<Pipeline> pipelinesFromSCM = scmLocationClient.listPipelines();
Pipeline firstPipeline;
init();
if (!arePipelinesOrDatanodesProvided()) {
// default behaviour if no arguments provided
firstPipeline = pipelinesFromSCM.stream().filter(p -> p.getFactor() == ReplicationFactor.THREE).findFirst().orElseThrow(() -> new IllegalArgumentException("Pipeline ID is NOT defined, and no pipeline " + "has been found with factor=THREE"));
XceiverClientSpi xceiverClientSpi = xceiverClientManager.acquireClient(firstPipeline);
xceiverClients = new ArrayList<>();
xceiverClients.add(xceiverClientSpi);
} else {
xceiverClients = new ArrayList<>();
pipelines = new HashSet<>();
for (String pipelineId : pipelinesFromCmd) {
List<Pipeline> selectedPipelines = pipelinesFromSCM.stream().filter((p -> p.getId().toString().equals("PipelineID=" + pipelineId) || pipelineContainsDatanode(p, datanodeHosts))).collect(Collectors.toList());
pipelines.addAll(selectedPipelines);
}
for (Pipeline p : pipelines) {
LOG.info("Writing to pipeline: " + p.getId());
xceiverClients.add(xceiverClientManager.acquireClient(p));
}
if (pipelines.isEmpty()) {
throw new IllegalArgumentException("Couldn't find the any/the selected pipeline");
}
}
runTest();
} finally {
for (XceiverClientSpi xceiverClientSpi : xceiverClients) {
if (xceiverClientSpi != null) {
xceiverClientSpi.close();
}
}
}
return null;
}
19
Source : DatanodeBlockPutter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConf = createOzoneConfiguration();
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("datanode-block-putter is not supported in secure environment");
}
try (StorageContainerLocationProtocol scmLocationClient = createStorageContainerLocationClient(ozoneConf)) {
Pipeline pipeline = findPipelineForTest(pipelineId, scmLocationClient, LOG);
try (XceiverClientManager xceiverClientManager = new XceiverClientManager(ozoneConf)) {
client = xceiverClientManager.acquireClient(pipeline);
timer = getMetrics().timer("put-block");
byte[] data = RandomStringUtils.randomAscii(chunkSize).getBytes(StandardCharsets.UTF_8);
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
checksumProtobuf = checksum.computeChecksum(data).getProtoBufMessage();
runTests(this::putBlock);
}
} finally {
if (client != null) {
client.close();
}
}
return null;
}
19
Source : ClosedContainerReplicator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
OzoneConfiguration conf = createOzoneConfiguration();
final Collection<String> datanodeStorageDirs = MutableVolumeSet.getDatanodeStorageDirs(conf);
for (String dir : datanodeStorageDirs) {
checkDestinationDirectory(dir);
}
// logic same as the download+import on the destination datanode
initializeReplicationSupervisor(conf);
final ContainerOperationClient containerOperationClient = new ContainerOperationClient(conf);
final List<ContainerInfo> containerInfos = containerOperationClient.listContainer(0L, 1_000_000);
replicationTasks = new ArrayList<>();
for (ContainerInfo container : containerInfos) {
final ContainerWithPipeline containerWithPipeline = containerOperationClient.getContainerWithPipeline(container.getContainerID());
if (container.getState() == LifeCycleState.CLOSED) {
final List<DatanodeDetails> datanodesWithContainer = containerWithPipeline.getPipeline().getNodes();
final List<String> datanodeUUIDs = datanodesWithContainer.stream().map(DatanodeDetails::getUuidString).collect(Collectors.toList());
// if datanode is specified, replicate only container if it has a
// replica.
if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) {
replicationTasks.add(new ReplicationTask(container.getContainerID(), datanodesWithContainer));
}
}
}
// important: override the max number of tasks.
setTestNo(replicationTasks.size());
init();
timer = getMetrics().timer("replicate-container");
runTests(this::replicateContainer);
return null;
}
19
Source : BaseFreonGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
protected OzoneClient createOzoneClient(String omServiceID, OzoneConfiguration conf) throws Exception {
if (omServiceID != null) {
return OzoneClientFactory.getRpcClient(omServiceID, conf);
} else {
return OzoneClientFactory.getRpcClient(conf);
}
}
19
Source : BaseAppendLogGenerator.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
protected void setServerIdFromFile(OzoneConfiguration conf) throws IOException {
File idFile = new File(HddsServerUtil.getDatanodeIdFilePath(conf));
if ((this.serverId == null || this.serverId.equals("")) && idFile.exists()) {
DatanodeDetails datanodeDetails = DatanodeIdYaml.readDatanodeIdFile(idFile);
this.serverId = datanodeDetails.getUuidString();
}
Preconditions.replacedertTrue(!serverId.equals(""), "Server id is not specified and can't be read from " + idFile.getAbsolutePath());
}
19
Source : OzoneGetConf.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void main(String[] argv) {
LogManager.resetConfiguration();
Logger.getRootLogger().setLevel(Level.INFO);
Logger.getRootLogger().addAppender(new ConsoleAppender(new PatternLayout("%m%n")));
Logger.getLogger(NativeCodeLoader.clreplaced).setLevel(Level.ERROR);
OzoneConfiguration conf = new OzoneConfiguration();
conf.addResource(new OzoneConfiguration());
new OzoneGetConf(conf).run(argv);
}
19
Source : TestOzoneS3Util.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Clreplaced used to test OzoneS3Util.
*/
public clreplaced TestOzoneS3Util {
private OzoneConfiguration configuration;
private String serviceID = "omService";
@Before
public void setConf() {
configuration = new OzoneConfiguration();
String nodeIDs = "om1,om2,om3";
configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID);
configuration.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID, nodeIDs);
configuration.setBoolean(HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
}
@Test
public void testBuildServiceNameForToken() {
Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration, serviceID);
configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om1"), "om1:9862");
configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om2"), "om2:9862");
configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om3"), "om3:9862");
String expectedOmServiceAddress = buildServiceAddress(nodeIDList);
SecurityUtil.setConfiguration(configuration);
String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration, serviceID, nodeIDList);
replacedert.replacedertEquals(expectedOmServiceAddress, omserviceAddr);
}
@Test
public void testBuildServiceNameForTokenIncorrectConfig() {
Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration, serviceID);
// Don't set om3 node rpc address. Here we are skipping setting of one of
// the OM address. So buildServiceNameForToken will fail.
configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om1"), "om1:9862");
configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om2"), "om2:9862");
SecurityUtil.setConfiguration(configuration);
try {
OzoneS3Util.buildServiceNameForToken(configuration, serviceID, nodeIDList);
fail("testBuildServiceNameForTokenIncorrectConfig failed");
} catch (IllegalArgumentException ex) {
GenericTestUtils.replacedertExceptionContains("Could not find rpcAddress " + "for", ex);
}
}
/**
* Build serviceName from list of node ids.
* @param nodeIDList
* @return service name for token.
*/
private String buildServiceAddress(Collection<String> nodeIDList) {
StringBuilder omServiceAddrBuilder = new StringBuilder();
int nodesLength = nodeIDList.size();
int counter = 0;
for (String nodeID : nodeIDList) {
counter++;
String addr = configuration.get(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, nodeID));
if (counter != nodesLength) {
omServiceAddrBuilder.append(addr + ",");
} else {
omServiceAddrBuilder.append(addr);
}
}
return omServiceAddrBuilder.toString();
}
}
19
Source : TestVirtualHostStyleFilter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* This clreplaced test virtual host style mapping conversion to path style.
*/
public clreplaced TestVirtualHostStyleFilter {
private OzoneConfiguration conf;
private String s3HttpAddr;
@Before
public void setup() {
conf = new OzoneConfiguration();
s3HttpAddr = "localhost:9878";
conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, s3HttpAddr);
s3HttpAddr = s3HttpAddr.substring(0, s3HttpAddr.lastIndexOf(":"));
conf.set(S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME, s3HttpAddr);
}
/**
* Create containerRequest object.
* @return ContainerRequest
* @throws Exception
*/
public ContainerRequest createContainerRequest(String host, String path, String queryParams, boolean virtualHostStyle) throws Exception {
URI baseUri = new URI("http://" + s3HttpAddr);
URI virtualHostStyleUri;
if (path == null && queryParams == null) {
virtualHostStyleUri = new URI("http://" + s3HttpAddr);
} else if (path != null && queryParams == null) {
virtualHostStyleUri = new URI("http://" + s3HttpAddr + path);
} else if (path != null && queryParams != null) {
virtualHostStyleUri = new URI("http://" + s3HttpAddr + path + queryParams);
} else {
virtualHostStyleUri = new URI("http://" + s3HttpAddr + queryParams);
}
URI pathStyleUri;
if (queryParams == null) {
pathStyleUri = new URI("http://" + s3HttpAddr + path);
} else {
pathStyleUri = new URI("http://" + s3HttpAddr + path + queryParams);
}
String httpMethod = "DELETE";
SecurityContext securityContext = Mockito.mock(SecurityContext.clreplaced);
PropertiesDelegate propertiesDelegate = Mockito.mock(PropertiesDelegate.clreplaced);
ContainerRequest containerRequest;
if (virtualHostStyle) {
containerRequest = new ContainerRequest(baseUri, virtualHostStyleUri, httpMethod, securityContext, propertiesDelegate);
containerRequest.header(HttpHeaders.HOST, host);
} else {
containerRequest = new ContainerRequest(baseUri, pathStyleUri, httpMethod, securityContext, propertiesDelegate);
containerRequest.header(HttpHeaders.HOST, host);
}
return containerRequest;
}
@Test
public void testVirtualHostStyle() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", "/myfile", null, true);
virtualHostStyleFilter.filter(containerRequest);
URI expected = new URI("http://" + s3HttpAddr + "/mybucket/myfile");
replacedert.replacedertEquals(expected, containerRequest.getRequestUri());
}
@Test
public void testPathStyle() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest(s3HttpAddr, "/mybucket/myfile", null, false);
virtualHostStyleFilter.filter(containerRequest);
URI expected = new URI("http://" + s3HttpAddr + "/mybucket/myfile");
replacedert.replacedertEquals(expected, containerRequest.getRequestUri());
}
@Test
public void testVirtualHostStyleWithCreateBucketRequest() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, null, true);
virtualHostStyleFilter.filter(containerRequest);
URI expected = new URI("http://" + s3HttpAddr + "/mybucket");
replacedert.replacedertEquals(expected, containerRequest.getRequestUri());
}
@Test
public void testVirtualHostStyleWithQueryParams() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, "?prefix=bh", true);
virtualHostStyleFilter.filter(containerRequest);
URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh");
replacedertTrue(expected.toString().contains(containerRequest.getRequestUri().toString()));
containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, "?prefix=bh&type=dir", true);
virtualHostStyleFilter.filter(containerRequest);
expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh&type=dir");
replacedertTrue(expected.toString().contains(containerRequest.getRequestUri().toString()));
}
@Test
public void testVirtualHostStyleWithNoMatchingDomain() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest("mybucket" + ".myhost:9999", null, null, true);
try {
virtualHostStyleFilter.filter(containerRequest);
fail("testVirtualHostStyleWithNoMatchingDomain");
} catch (InvalidRequestException ex) {
GenericTestUtils.replacedertExceptionContains("No matching domain", ex);
}
}
@Test
public void testIncorrectVirtualHostStyle() throws Exception {
VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter();
virtualHostStyleFilter.setConfiguration(conf);
ContainerRequest containerRequest = createContainerRequest("mybucket" + "localhost:9878", null, null, true);
try {
virtualHostStyleFilter.filter(containerRequest);
fail("testIncorrectVirtualHostStyle failed");
} catch (InvalidRequestException ex) {
GenericTestUtils.replacedertExceptionContains("invalid format", ex);
}
}
}
19
Source : TestPermissionCheck.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Test operation permission check result.
*/
public clreplaced TestPermissionCheck {
private OzoneConfiguration conf;
private OzoneClient client;
private ObjectStore objectStore;
private OzoneBucket bucket;
private OzoneVolume volume;
private OMException exception;
private HttpHeaders headers;
@Before
public void setup() {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT);
client = Mockito.mock(OzoneClient.clreplaced);
objectStore = Mockito.mock(ObjectStore.clreplaced);
bucket = Mockito.mock(OzoneBucket.clreplaced);
volume = Mockito.mock(OzoneVolume.clreplaced);
exception = new OMException("Permission Denied", OMException.ResultCodes.PERMISSION_DENIED);
Mockito.when(client.getObjectStore()).thenReturn(objectStore);
Mockito.when(client.getConfiguration()).thenReturn(conf);
headers = Mockito.mock(HttpHeaders.clreplaced);
}
/**
* Root Endpoint.
*/
@Test
public void testListS3Buckets() throws IOException {
doThrow(exception).when(objectStore).getVolume(anyString());
RootEndpoint rootEndpoint = new RootEndpoint();
rootEndpoint.setClient(client);
try {
rootEndpoint.get();
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
/**
* Bucket Endpoint.
*/
@Test
public void testGetBucket() throws IOException {
doThrow(exception).when(objectStore).getS3Bucket(anyString());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
try {
bucketEndpoint.head("bucketName");
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testCreateBucket() throws IOException {
Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume);
doThrow(exception).when(objectStore).createS3Bucket(anyString());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
try {
bucketEndpoint.put("bucketName", null);
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testDeleteBucket() throws IOException {
doThrow(exception).when(objectStore).deleteS3Bucket(anyString());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
try {
bucketEndpoint.delete("bucketName");
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testListMultiUpload() throws IOException {
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).listMultipartUploads(anyString());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
try {
bucketEndpoint.listMultipartUploads("bucketName", "prefix");
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testListKey() throws IOException {
Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume);
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).listKeys(anyString());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
try {
bucketEndpoint.list("bucketName", null, null, null, 1000, null, null, null, null, null, null);
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testDeleteKeys() throws IOException, OS3Exception {
Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume);
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).deleteKey(any());
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
MultiDeleteRequest request = new MultiDeleteRequest();
List<MultiDeleteRequest.DeleteObject> objectList = new ArrayList<>();
objectList.add(new MultiDeleteRequest.DeleteObject("deleteKeyName"));
request.setQuiet(false);
request.setObjects(objectList);
MultiDeleteResponse response = bucketEndpoint.multiDelete("BucketName", "keyName", request);
replacedert.replacedertTrue(response.getErrors().size() == 1);
replacedert.replacedertTrue(response.getErrors().get(0).getCode().equals("PermissionDenied"));
}
/**
* Object Endpoint.
*/
@Test
public void testGetKey() throws IOException {
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).getKey(anyString());
ObjectEndpoint objectEndpoint = new ObjectEndpoint();
objectEndpoint.setClient(client);
objectEndpoint.setHeaders(headers);
try {
objectEndpoint.get("bucketName", "keyPath", null, 1000, "marker", null);
replacedert.fail("Should fail");
} catch (Exception e) {
e.printStackTrace();
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testPutKey() throws IOException {
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).createKey(anyString(), anyLong(), any(), any(), any());
ObjectEndpoint objectEndpoint = new ObjectEndpoint();
objectEndpoint.setClient(client);
objectEndpoint.setHeaders(headers);
try {
objectEndpoint.put("bucketName", "keyPath", 1024, 0, null, null);
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testDeleteKey() throws IOException {
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).deleteKey(anyString());
ObjectEndpoint objectEndpoint = new ObjectEndpoint();
objectEndpoint.setClient(client);
objectEndpoint.setHeaders(headers);
try {
objectEndpoint.delete("bucketName", "keyPath", null);
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
@Test
public void testMultiUploadKey() throws IOException {
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), any(), any());
ObjectEndpoint objectEndpoint = new ObjectEndpoint();
objectEndpoint.setClient(client);
objectEndpoint.setHeaders(headers);
try {
objectEndpoint.initializeMultipartUpload("bucketName", "keyPath");
replacedert.fail("Should fail");
} catch (Exception e) {
replacedert.replacedertTrue(e instanceof OS3Exception);
replacedert.replacedertTrue(((OS3Exception) e).getHttpCode() == HTTP_FORBIDDEN);
}
}
}
19
Source : ObjectStoreStub.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* ObjectStore implementation with in-memory state.
*/
public clreplaced ObjectStoreStub extends ObjectStore {
public ObjectStoreStub() {
super();
}
private Map<String, OzoneVolumeStub> volumes = new HashMap<>();
private Map<String, Boolean> bucketEmptyStatus = new HashMap<>();
private static OzoneConfiguration conf = new OzoneConfiguration();
@Override
public void createVolume(String volumeName) throws IOException {
createVolume(volumeName, VolumeArgs.newBuilder().setAdmin("root").setOwner("root").setQuotaInBytes(Integer.MAX_VALUE).setAcls(new ArrayList<>()).build());
}
@Override
public void createVolume(String volumeName, VolumeArgs volumeArgs) {
OzoneVolumeStub volume = new OzoneVolumeStub(volumeName, volumeArgs.getAdmin(), volumeArgs.getOwner(), volumeArgs.getQuotaInBytes(), volumeArgs.getQuotaInNamespace(), Time.now(), volumeArgs.getAcls());
volumes.put(volumeName, volume);
}
@Override
public OzoneVolume getVolume(String volumeName) throws IOException {
if (volumes.containsKey(volumeName)) {
return volumes.get(volumeName);
} else {
throw new OMException("", VOLUME_NOT_FOUND);
}
}
@Override
public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix) throws IOException {
return volumes.values().stream().filter(volume -> volume.getName().startsWith(volumePrefix)).collect(Collectors.toList()).iterator();
}
@Override
public Iterator<? extends OzoneVolume> listVolumes(String volumePrefix, String prevVolume) throws IOException {
return volumes.values().stream().filter(volume -> volume.getName().compareTo(prevVolume) > 0).filter(volume -> volume.getName().startsWith(volumePrefix)).collect(Collectors.toList()).iterator();
}
@Override
public Iterator<? extends OzoneVolume> listVolumesByUser(String user, String volumePrefix, String prevVolume) throws IOException {
return volumes.values().stream().filter(volume -> volume.getOwner().equals(user)).filter(volume -> volume.getName().compareTo(prevVolume) < 0).filter(volume -> volume.getName().startsWith(volumePrefix)).collect(Collectors.toList()).iterator();
}
@Override
public void deleteVolume(String volumeName) throws IOException {
volumes.remove(volumeName);
}
@Override
public void createS3Bucket(String s3BucketName) throws IOException {
if (!bucketEmptyStatus.containsKey(s3BucketName)) {
String volumeName = HddsClientUtils.getS3VolumeName(conf);
bucketEmptyStatus.put(s3BucketName, true);
if (!volumes.containsKey(volumeName)) {
createVolume(volumeName);
}
volumes.get(volumeName).createBucket(s3BucketName);
} else {
throw new OMException("", BUCKET_ALREADY_EXISTS);
}
}
@Override
public void deleteS3Bucket(String s3BucketName) throws IOException {
if (bucketEmptyStatus.containsKey(s3BucketName)) {
if (bucketEmptyStatus.get(s3BucketName)) {
bucketEmptyStatus.remove(s3BucketName);
} else {
throw new OMException("", BUCKET_NOT_EMPTY);
}
} else {
throw new OMException("", BUCKET_NOT_FOUND);
}
}
public void setBucketEmptyStatus(String bucketName, boolean status) {
bucketEmptyStatus.computeIfPresent(bucketName, (k, v) -> status);
}
}
19
Source : VirtualHostStyleFilter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Filter used to convert virtual host style pattern to path style pattern.
*/
@Provider
@PreMatching
@Priority(VirtualHostStyleFilter.PRIORITY)
public clreplaced VirtualHostStyleFilter implements ContainerRequestFilter {
public static final int PRIORITY = 100;
private static final Logger LOG = LoggerFactory.getLogger(VirtualHostStyleFilter.clreplaced);
@Inject
private OzoneConfiguration conf;
private String[] domains;
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME);
if (domains.length == 0) {
// domains is not configured, might be it is path style.
// So, do not continue further, just return.
return;
}
// Get the value of the host
String host = requestContext.getHeaderString(HttpHeaders.HOST);
host = checkHostWithoutPort(host);
String domain = getDomainName(host);
if (domain == null) {
throw getException("Invalid S3 Gateway request {" + requestContext.getUriInfo().getRequestUri().toString() + " }: No matching domain " + "{" + Arrays.toString(domains) + "} for the host {" + host + "}");
}
LOG.debug("Http header host name is {}", host);
LOG.debug("Domain name matched is {}", domain);
// Check if we have a Virtual Host style request, host length greater than
// address length means it is virtual host style, we need to convert to
// path style.
if (host.length() > domain.length()) {
String bucketName = host.substring(0, host.length() - domain.length());
if (!bucketName.endsWith(".")) {
// Checking this as the virtual host style pattern is http://bucket.host/
throw getException("Invalid S3 Gateway request {" + requestContext.getUriInfo().getRequestUri().toString() + "}:" + " Host: {" + host + " is in invalid format");
} else {
bucketName = bucketName.substring(0, bucketName.length() - 1);
}
LOG.debug("Bucket name is {}", bucketName);
URI baseURI = requestContext.getUriInfo().getBaseUri();
String currentPath = requestContext.getUriInfo().getPath();
String newPath = bucketName;
if (currentPath != null) {
newPath += String.format("%s", currentPath);
}
MultivaluedMap<String, String> queryParams = requestContext.getUriInfo().getQueryParameters();
UriBuilder requestAddrBuilder = UriBuilder.fromUri(baseURI).path(newPath);
queryParams.forEach((k, v) -> requestAddrBuilder.queryParam(k, v.toArray()));
URI requestAddr = requestAddrBuilder.build();
requestContext.setRequestUri(baseURI, requestAddr);
}
}
private InvalidRequestException getException(String message) {
return new InvalidRequestException(message);
}
@VisibleForTesting
public void setConfiguration(OzoneConfiguration config) {
this.conf = config;
}
/**
* This method finds the longest match with the domain name.
* @param host
* @return domain name matched with the host. if none of them are matching,
* return null.
*/
private String getDomainName(String host) {
String match = null;
int length = 0;
for (String domainVal : domains) {
if (host.endsWith(domainVal)) {
int len = domainVal.length();
if (len > length) {
length = len;
match = domainVal;
}
}
}
return match;
}
private String checkHostWithoutPort(String host) {
if (host.contains(":")) {
return host.substring(0, host.lastIndexOf(":"));
} else {
return host;
}
}
}
19
Source : VirtualHostStyleFilter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@VisibleForTesting
public void setConfiguration(OzoneConfiguration config) {
this.conf = config;
}
19
Source : OzoneServiceProvider.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* This clreplaced creates the OM service .
*/
@ApplicationScoped
public clreplaced OzoneServiceProvider {
private Text omServiceAddr;
private String omserviceID;
@Inject
private OzoneConfiguration conf;
@PostConstruct
public void init() {
Collection<String> serviceIdList = conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY);
if (serviceIdList.size() == 0) {
// Non-HA cluster
omServiceAddr = SecurityUtil.buildTokenService(OmUtils.getOmAddressForClients(conf));
} else {
// HA cluster.
// For now if multiple service id's are configured we throw exception.
// As if multiple service id's are configured, S3Gateway will not be
// knowing which one to talk to. In future, if OM federation is supported
// we can resolve this by having another property like
// ozone.om.internal.service.id.
// TODO: Revisit this later.
if (serviceIdList.size() > 1) {
throw new IllegalArgumentException("Multiple serviceIds are " + "configured. " + Arrays.toString(serviceIdList.toArray()));
} else {
String serviceId = serviceIdList.iterator().next();
Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
if (omNodeIds.size() == 0) {
throw new IllegalArgumentException(OZONE_OM_NODES_KEY + "." + serviceId + " is not defined");
}
omServiceAddr = new Text(OzoneS3Util.buildServiceNameForToken(conf, serviceId, omNodeIds));
omserviceID = serviceId;
}
}
}
@Produces
public Text getService() {
return omServiceAddr;
}
@Produces
public String getOmServiceID() {
return omserviceID;
}
}
19
Source : OzoneConfigurationHolder.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Ozone Configuration factory.
* <p>
* As the OzoneConfiguration is created by the CLI application here we inject
* it via a singleton instance to the Jax-RS/CDI instances.
*/
public clreplaced OzoneConfigurationHolder {
private static OzoneConfiguration configuration;
@Produces
public OzoneConfiguration configuration() {
return configuration;
}
public static void setConfiguration(OzoneConfiguration conf) {
OzoneConfigurationHolder.configuration = conf;
}
}
19
Source : OzoneConfigurationHolder.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void setConfiguration(OzoneConfiguration conf) {
OzoneConfigurationHolder.configuration = conf;
}
19
Source : OzoneClientProducer.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* This clreplaced creates the OzoneClient for the Rest endpoints.
*/
@RequestScoped
public clreplaced OzoneClientProducer {
private static final Logger LOG = LoggerFactory.getLogger(OzoneClientProducer.clreplaced);
private OzoneClient client;
@Inject
private SignatureProcessor signatureParser;
@Inject
private OzoneConfiguration ozoneConfiguration;
@Inject
private Text omService;
@Inject
private String omServiceID;
@Produces
public OzoneClient createClient() throws OS3Exception, IOException {
client = getClient(ozoneConfiguration);
return client;
}
@PreDestroy
public void destroy() throws IOException {
client.close();
}
private OzoneClient getClient(OzoneConfiguration config) throws OS3Exception {
OzoneClient ozoneClient = null;
try {
// Check if any error occurred during creation of signatureProcessor.
if (signatureParser.getException() != null) {
throw signatureParser.getException();
}
String awsAccessId = signatureParser.getAwsAccessId();
validateAccessId(awsAccessId);
UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(awsAccessId);
if (OzoneSecurityUtil.isSecurityEnabled(config)) {
LOG.debug("Creating s3 auth info for client.");
try {
OzoneTokenIdentifier identifier = new OzoneTokenIdentifier();
identifier.setTokenType(S3AUTHINFO);
identifier.setStrToSign(signatureParser.getStringToSign());
identifier.setSignature(signatureParser.getSignature());
identifier.setAwsAccessId(awsAccessId);
identifier.setOwner(new Text(awsAccessId));
if (LOG.isTraceEnabled()) {
LOG.trace("Adding token for service:{}", omService);
}
Token<OzoneTokenIdentifier> token = new Token(identifier.getBytes(), identifier.getSignature().getBytes(UTF_8), identifier.getKind(), omService);
remoteUser.addToken(token);
} catch (OS3Exception | URISyntaxException ex) {
throw S3_AUTHINFO_CREATION_ERROR;
}
}
ozoneClient = remoteUser.doAs((PrivilegedExceptionAction<OzoneClient>) () -> {
if (omServiceID == null) {
return OzoneClientFactory.getRpcClient(ozoneConfiguration);
} else {
// As in HA case, we need to preplaced om service ID.
return OzoneClientFactory.getRpcClient(omServiceID, ozoneConfiguration);
}
});
} catch (OS3Exception ex) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error during Client Creation: ", ex);
}
throw ex;
} catch (Throwable t) {
// For any other critical errors during object creation throw Internal
// error.
if (LOG.isDebugEnabled()) {
LOG.debug("Error during Client Creation: ", t);
}
throw INTERNAL_ERROR;
}
return ozoneClient;
}
// ONLY validate aws access id when needed.
private void validateAccessId(String awsAccessId) throws Exception {
if (awsAccessId == null || awsAccessId.equals("")) {
LOG.error("Malformed s3 header. awsAccessID: ", awsAccessId);
throw MALFORMED_HEADER;
}
}
public void setOzoneConfiguration(OzoneConfiguration config) {
this.ozoneConfiguration = config;
}
@VisibleForTesting
public void setSignatureParser(SignatureProcessor signatureParser) {
this.signatureParser = signatureParser;
}
}
19
Source : OzoneClientProducer.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public void setOzoneConfiguration(OzoneConfiguration config) {
this.ozoneConfiguration = config;
}
19
Source : Gateway.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public Void call() throws Exception {
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
TracingUtil.initTracing("S3gateway", ozoneConfiguration);
OzoneConfigurationHolder.setConfiguration(ozoneConfiguration);
UserGroupInformation.setConfiguration(ozoneConfiguration);
httpServer = new S3GatewayHttpServer(ozoneConfiguration, "s3gateway");
start();
return null;
}
19
Source : ObjectEndpoint.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* Key level rest endpoints.
*/
@Path("/{bucket}/{path:.+}")
public clreplaced ObjectEndpoint extends EndpointBase {
private static final Logger LOG = LoggerFactory.getLogger(ObjectEndpoint.clreplaced);
@Context
private HttpHeaders headers;
private List<String> customizableGetHeaders = new ArrayList<>();
private int bufferSize;
public ObjectEndpoint() {
customizableGetHeaders.add("Content-Type");
customizableGetHeaders.add("Content-Language");
customizableGetHeaders.add("Expires");
customizableGetHeaders.add("Cache-Control");
customizableGetHeaders.add("Content-Disposition");
customizableGetHeaders.add("Content-Encoding");
}
@Inject
private OzoneConfiguration ozoneConfiguration;
@PostConstruct
public void init() {
bufferSize = (int) ozoneConfiguration.getStorageSize(OZONE_S3G_CLIENT_BUFFER_SIZE_KEY, OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT, StorageUnit.BYTES);
}
/**
* Rest endpoint to upload object to a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
* more details.
*/
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
OzoneOutputStream output = null;
if (uploadID != null && !uploadID.equals("")) {
// If uploadID is specified, it is a request for upload part
return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
}
try {
String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
String storageType = headers.getHeaderString(STORAGE_CLreplaced_HEADER);
S3StorageType s3StorageType;
boolean storageTypeDefault;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault();
storageTypeDefault = true;
} else {
s3StorageType = toS3StorageType(storageType);
storageTypeDefault = false;
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
if (copyHeader != null) {
// Copy object, as copy source available.
CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
return Response.status(Status.OK).enreplacedy(copyObjectResponse).header("Connection", "close").build();
}
// Normal put object
OzoneBucket bucket = getBucket(bucketName);
output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
IOUtils.copy(body, output);
return Response.ok().status(HttpStatus.SC_OK).build();
} catch (IOException ex) {
LOG.error("Exception occurred in PutObject", ex);
if (ex instanceof OMException) {
if (((OMException) ex).getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, keyPath);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
throw os3Exception;
} else if ((((OMException) ex).getResult() == ResultCodes.PERMISSION_DENIED)) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
}
}
throw ex;
} finally {
if (output != null) {
output.close();
}
}
}
/**
* Rest endpoint to download object from a bucket, if query param uploadId
* is specified, request for list parts of a multipart upload key with
* specific uploadId.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
* https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
* for more details.
*/
@GET
public Response get(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, @QueryParam("part-number-marker") String partNumberMarker, InputStream body) throws IOException, OS3Exception {
try {
if (uploadId != null) {
// When we have uploadId, this is the request for list Parts.
int partMarker = parsePartNumberMarker(partNumberMarker);
return listParts(bucketName, keyPath, uploadId, partMarker, maxParts);
}
OzoneBucket bucket = getBucket(bucketName);
OzoneKeyDetails keyDetails = bucket.getKey(keyPath);
long length = keyDetails.getDataSize();
LOG.debug("Data length of the key {} is {}", keyPath, length);
String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER);
RangeHeader rangeHeader = null;
LOG.debug("range Header provided value: {}", rangeHeaderVal);
if (rangeHeaderVal != null) {
rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, length);
LOG.debug("range Header provided: {}", rangeHeader);
if (rangeHeader.isInValidRange()) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
}
}
ResponseBuilder responseBuilder;
if (rangeHeaderVal == null || rangeHeader.isReadFull()) {
StreamingOutput output = dest -> {
try (OzoneInputStream key = bucket.readKey(keyPath)) {
IOUtils.copy(key, dest);
}
};
responseBuilder = Response.ok(output).header(CONTENT_LENGTH, keyDetails.getDataSize());
} else {
OzoneInputStream key = bucket.readKey(keyPath);
long startOffset = rangeHeader.getStartOffset();
long endOffset = rangeHeader.getEndOffset();
// eg. if range header is given as bytes=0-0, then we should return 1
// byte from start offset
long copyLength = endOffset - startOffset + 1;
StreamingOutput output = dest -> {
try (S3WrapperInputStream s3WrapperInputStream = new S3WrapperInputStream(key.getInputStream())) {
s3WrapperInputStream.seek(startOffset);
IOUtils.copyLarge(s3WrapperInputStream, dest, 0, copyLength, new byte[bufferSize]);
}
};
responseBuilder = Response.ok(output).header(CONTENT_LENGTH, copyLength);
String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " + rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() + "/" + length;
responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
}
responseBuilder.header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);
for (String responseHeader : customizableGetHeaders) {
String headerValue = headers.getHeaderString(responseHeader);
if (headerValue != null) {
responseBuilder.header(responseHeader, headerValue);
}
}
addLastModifiedDate(responseBuilder, keyDetails);
return responseBuilder.build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyPath);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
} else {
throw ex;
}
}
}
private void addLastModifiedDate(ResponseBuilder responseBuilder, OzoneKeyDetails key) {
ZonedDateTime lastModificationTime = key.getModificationTime().atZone(ZoneId.of("GMT"));
responseBuilder.header(LAST_MODIFIED, RFC1123Util.FORMAT.format(lastModificationTime));
}
/**
* Rest endpoint to check existence of an object in a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
* for more details.
*/
@HEAD
public Response head(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath) throws IOException, OS3Exception {
OzoneKeyDetails key;
try {
key = getBucket(bucketName).getKey(keyPath);
// TODO: return the specified range bytes of this object.
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
// Just return 404 with no content
return Response.status(Status.NOT_FOUND).build();
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
} else {
throw ex;
}
}
ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK).header("ETag", "" + key.getModificationTime()).header("Content-Length", key.getDataSize()).header("Content-Type", "binary/octet-stream");
addLastModifiedDate(response, key);
return response.build();
}
/**
* Abort multipart upload request.
* @param bucket
* @param key
* @param uploadId
* @return Response
* @throws IOException
* @throws OS3Exception
*/
private Response abortMultipartUpload(String bucket, String key, String uploadId) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
ozoneBucket.abortMultipartUpload(key, uploadId);
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId);
}
throw ex;
}
return Response.status(Status.NO_CONTENT).build();
}
/**
* Delete a specific object from a bucket, if query param uploadId is
* specified, this request is for abort multipart upload.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
* https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html
* for more details.
*/
@DELETE
@SuppressWarnings("emptyblock")
public Response delete(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @QueryParam("uploadId") @DefaultValue("") String uploadId) throws IOException, OS3Exception {
try {
if (uploadId != null && !uploadId.equals("")) {
return abortMultipartUpload(bucketName, keyPath, uploadId);
}
OzoneBucket bucket = getBucket(bucketName);
bucket.getKey(keyPath);
bucket.deleteKey(keyPath);
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
} else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
// NOT_FOUND is not a problem, AWS doesn't throw exception for missing
// keys. Just return 204
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
} else {
throw ex;
}
}
return Response.status(Status.NO_CONTENT).build();
}
/**
* Initialize MultiPartUpload request.
* <p>
* Note: the specific content type is set by the HeaderPreprocessor.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String storageType = headers.getHeaderString(STORAGE_CLreplaced_HEADER);
S3StorageType s3StorageType;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault();
} else {
s3StorageType = toS3StorageType(storageType);
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationType, replicationFactor);
MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
multipartUploadInitiateResponse.setBucket(bucket);
multipartUploadInitiateResponse.setKey(key);
multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
return Response.status(Status.OK).enreplacedy(multipartUploadInitiateResponse).build();
} catch (OMException ex) {
LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, key);
}
throw ex;
}
}
/**
* Complete a multipart upload.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
OzoneBucket ozoneBucket = getBucket(bucket);
// Using LinkedHashMap to preserve ordering of parts list.
Map<Integer, String> partsMap = new LinkedHashMap<>();
List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
try {
for (CompleteMultipartUploadRequest.Part part : partList) {
partsMap.put(part.getPartNumber(), part.geteTag());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Parts map {}", partsMap);
}
omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
completeMultipartUploadResponse.setBucket(bucket);
completeMultipartUploadResponse.setKey(key);
completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
// Location also setting as bucket name.
completeMultipartUploadResponse.setLocation(bucket);
return Response.status(Status.OK).enreplacedy(completeMultipartUploadResponse).build();
} catch (OMException ex) {
LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
if (ex.getResult() == ResultCodes.INVALID_PART) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key);
} else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key);
} else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID);
} else if (ex.getResult() == ResultCodes.ENreplacedY_TOO_SMALL) {
throw S3ErrorTable.newError(ENreplacedY_TOO_SMALL, key);
} else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
throw os3Exception;
}
throw ex;
}
}
private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String copyHeader;
OzoneOutputStream ozoneOutputStream = null;
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
try {
ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
if (copyHeader != null) {
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
throw S3ErrorTable.newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
}
try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
if (range != null) {
RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
if (skipped != rangeHeader.getStartOffset()) {
throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
}
IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
} else {
IOUtils.copy(sourceObject, ozoneOutputStream);
}
}
} else {
IOUtils.copy(body, ozoneOutputStream);
}
} finally {
if (ozoneOutputStream != null) {
ozoneOutputStream.close();
}
}
replacedert ozoneOutputStream != null;
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
String eTag = omMultipartCommitUploadPartInfo.getPartName();
if (copyHeader != null) {
return Response.ok(new CopyPartResult(eTag)).build();
} else {
return Response.ok().header("ETag", eTag).build();
}
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key);
}
throw ex;
}
}
/**
* Returns response for the listParts request.
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
* @param bucket
* @param key
* @param uploadID
* @param partNumberMarker
* @param maxParts
* @return
* @throws IOException
* @throws OS3Exception
*/
private Response listParts(String bucket, String key, String uploadID, int partNumberMarker, int maxParts) throws IOException, OS3Exception {
ListPartsResponse listPartsResponse = new ListPartsResponse();
try {
OzoneBucket ozoneBucket = getBucket(bucket);
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = ozoneBucket.listParts(key, uploadID, partNumberMarker, maxParts);
listPartsResponse.setBucket(bucket);
listPartsResponse.setKey(key);
listPartsResponse.setUploadID(uploadID);
listPartsResponse.setMaxParts(maxParts);
listPartsResponse.setPartNumberMarker(partNumberMarker);
listPartsResponse.setTruncated(false);
listPartsResponse.setStorageClreplaced(S3StorageType.fromReplicationType(ozoneMultipartUploadPartListParts.getReplicationType(), ozoneMultipartUploadPartListParts.getReplicationFactor()).toString());
if (ozoneMultipartUploadPartListParts.isTruncated()) {
listPartsResponse.setTruncated(ozoneMultipartUploadPartListParts.isTruncated());
listPartsResponse.setNextPartNumberMarker(ozoneMultipartUploadPartListParts.getNextPartNumberMarker());
}
ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> {
ListPartsResponse.Part part = new ListPartsResponse.Part();
part.setPartNumber(partInfo.getPartNumber());
part.setETag(partInfo.getPartName());
part.setSize(partInfo.getSize());
part.setLastModified(Instant.ofEpochMilli(partInfo.getModificationTime()));
listPartsResponse.addPart(part);
});
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key + "/" + uploadID);
}
throw ex;
}
return Response.status(Status.OK).enreplacedy(listPartsResponse).build();
}
@VisibleForTesting
public void setHeaders(HttpHeaders headers) {
this.headers = headers;
}
private CopyObjectResponse copyObject(String copyHeader, String destBucket, String destkey, ReplicationType replicationType, ReplicationFactor replicationFactor, boolean storageTypeDefault) throws OS3Exception, IOException {
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
OzoneInputStream sourceInputStream = null;
OzoneOutputStream destOutputStream = null;
boolean closed = false;
try {
// Checking whether we trying to copying to it self.
if (sourceBucket.equals(destBucket) && sourceKey.equals(destkey)) {
// When copying to same storage type when storage type is provided,
// we should not throw exception, as aws cli checks if any of the
// options like storage type are provided or not when source and
// dest are given same
if (storageTypeDefault) {
OS3Exception ex = S3ErrorTable.newError(S3ErrorTable.INVALID_REQUEST, copyHeader);
ex.setErrorMessage("This copy request is illegal because it is " + "trying to copy an object to it self itself without changing " + "the object's metadata, storage clreplaced, website redirect " + "location or encryption attributes.");
throw ex;
} else {
// TODO: Actually here we should change storage type, as ozone
// still does not support this just returning dummy response
// for now
CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
copyObjectResponse.setETag(OzoneUtils.getRequestID());
copyObjectResponse.setLastModified(Instant.ofEpochMilli(Time.now()));
return copyObjectResponse;
}
}
OzoneBucket sourceOzoneBucket = getBucket(sourceBucket);
OzoneBucket destOzoneBucket = getBucket(destBucket);
OzoneKeyDetails sourceKeyDetails = sourceOzoneBucket.getKey(sourceKey);
long sourceKeyLen = sourceKeyDetails.getDataSize();
sourceInputStream = sourceOzoneBucket.readKey(sourceKey);
destOutputStream = destOzoneBucket.createKey(destkey, sourceKeyLen, replicationType, replicationFactor, new HashMap<>());
IOUtils.copy(sourceInputStream, destOutputStream);
// Closing here, as if we don't call close this key will not commit in
// OM, and getKey fails.
sourceInputStream.close();
destOutputStream.close();
closed = true;
OzoneKeyDetails destKeyDetails = destOzoneBucket.getKey(destkey);
CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
copyObjectResponse.setETag(OzoneUtils.getRequestID());
copyObjectResponse.setLastModified(destKeyDetails.getModificationTime());
return copyObjectResponse;
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, sourceKey);
} else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, destBucket + "/" + destkey);
}
throw ex;
} finally {
if (!closed) {
if (sourceInputStream != null) {
sourceInputStream.close();
}
if (destOutputStream != null) {
destOutputStream.close();
}
}
}
}
/**
* Parse the key and bucket name from copy header.
*/
@VisibleForTesting
public static Pair<String, String> parseSourceHeader(String copyHeader) throws OS3Exception {
String header = copyHeader;
if (header.startsWith("/")) {
header = copyHeader.substring(1);
}
int pos = header.indexOf('/');
if (pos == -1) {
OS3Exception ex = S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, header);
ex.setErrorMessage("Copy Source must mention the source bucket and " + "key: sourcebucket/sourcekey");
throw ex;
}
return Pair.of(header.substring(0, pos), header.substring(pos + 1));
}
private static S3StorageType toS3StorageType(String storageType) throws OS3Exception {
try {
return S3StorageType.valueOf(storageType);
} catch (IllegalArgumentException ex) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, storageType);
}
}
private static int parsePartNumberMarker(String partNumberMarker) {
int partMarker = 0;
if (partNumberMarker != null) {
partMarker = Integer.parseInt(partNumberMarker);
}
return partMarker;
}
private static long parseOzoneDate(String ozoneDateStr) throws OS3Exception {
long ozoneDateInMs;
try {
ozoneDateInMs = OzoneUtils.formatDate(ozoneDateStr);
} catch (ParseException e) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, ozoneDateStr);
}
return ozoneDateInMs;
}
private boolean checkCopySourceModificationTime(Long lastModificationTime, String copySourceIfModifiedSinceStr, String copySourceIfUnmodifiedSinceStr) throws OS3Exception {
long copySourceIfModifiedSince = Long.MIN_VALUE;
long copySourceIfUnmodifiedSince = Long.MAX_VALUE;
if (copySourceIfModifiedSinceStr != null) {
copySourceIfModifiedSince = parseOzoneDate(copySourceIfModifiedSinceStr);
}
if (copySourceIfUnmodifiedSinceStr != null) {
copySourceIfUnmodifiedSince = parseOzoneDate(copySourceIfUnmodifiedSinceStr);
}
return (copySourceIfModifiedSince <= lastModificationTime) && (lastModificationTime <= copySourceIfUnmodifiedSince);
}
}
19
Source : TestReconUtils.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testGetReconDbDir() throws Exception {
String filePath = folder.getRoot().getAbsolutePath();
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set("TEST_DB_DIR", filePath);
File file = new ReconUtils().getReconDbDir(configuration, "TEST_DB_DIR");
replacedert.replacedertEquals(filePath, file.getAbsolutePath());
}
19
Source : TestReconTaskControllerImpl.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Before
public void setUp() {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
reconTaskStatusDao = getDao(ReconTaskStatusDao.clreplaced);
reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, reconTaskStatusDao, new HashSet<>());
reconTaskController.start();
}
19
Source : TestOMDBUpdatesHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test
public void testPut() throws Exception {
OzoneConfiguration configuration = createNewTestPath();
OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration);
// Create 1 volume, 2 keys and write to source OM DB.
String volumeKey = metaMgr.getVolumeKey("sampleVol");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
metaMgr.getVolumeTable().put(volumeKey, args);
OmKeyInfo firstKey = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", firstKey);
OmKeyInfo secondKey = getOmKeyInfo("sampleVol", "bucketOne", "key_two");
metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_two", secondKey);
// Write the secondKey to the target OM DB.
OzoneConfiguration conf2 = createNewTestPath();
OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2);
reconOmmetaMgr.getKeyTable().put("/sampleVol/bucketOne/key_two", secondKey);
RDBStore rdbStore = (RDBStore) metaMgr.getStore();
RocksDB rocksDB = rdbStore.getDb();
// Get all updates from source DB. (3 PUTs)
TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0);
List<byte[]> writeBatches = new ArrayList<>();
while (transactionLogIterator.isValid()) {
TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch();
result.writeBatch().markWalTerminationPoint();
WriteBatch writeBatch = result.writeBatch();
writeBatches.add(writeBatch.data());
transactionLogIterator.next();
}
// OMDBUpdatesHandler has access to target DB. Hence it has only the
// "secondKey".
OMDBUpdatesHandler omdbUpdatesHandler = new OMDBUpdatesHandler(reconOmmetaMgr);
for (byte[] data : writeBatches) {
WriteBatch writeBatch = new WriteBatch(data);
// Capture the 3 PUT events from source DB.
writeBatch.iterate(omdbUpdatesHandler);
}
List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
replacedertEquals(3, events.size());
OMDBUpdateEvent volEvent = events.get(0);
replacedertEquals(PUT, volEvent.getAction());
replacedertEquals(volumeKey, volEvent.getKey());
replacedertEquals(args.getVolume(), ((OmVolumeArgs) volEvent.getValue()).getVolume());
OMDBUpdateEvent keyEvent = events.get(1);
replacedertEquals(PUT, keyEvent.getAction());
replacedertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
replacedertNull(keyEvent.getOldValue());
OMDBUpdateEvent updateEvent = events.get(2);
replacedertEquals(UPDATE, updateEvent.getAction());
replacedertEquals("/sampleVol/bucketOne/key_two", updateEvent.getKey());
replacedertNotNull(updateEvent.getOldValue());
replacedertEquals(secondKey.getKeyName(), ((OmKeyInfo) updateEvent.getOldValue()).getKeyName());
}
19
Source : TestOMDBUpdatesHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private OzoneConfiguration createNewTestPath() throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration();
File newFolder = folder.newFolder();
if (!newFolder.exists()) {
replacedertTrue(newFolder.mkdirs());
}
ServerUtils.setOzoneMetaDirPath(configuration, newFolder.toString());
return configuration;
}
See More Examples