Here are the examples of the java api class org.apache.hadoop.conf.Configuration taken from open source projects.
1. JoinSelectStatisticsTest#getConfig()
Project: incubator-rya
File: JoinSelectStatisticsTest.java
File: JoinSelectStatisticsTest.java
private static Configuration getConfig() { Configuration conf = new Configuration(); conf.set("fs.default.name", "file:///"); conf.set("mapreduce.framework.name", "local"); conf.set("spo.table", "rya_spo"); conf.set("prospects.table", "rya_prospects"); conf.set("selectivity.table", "rya_selectivity"); conf.set("auths", ""); conf.set("instance", INSTANCE_NAME); conf.set("username", "root"); conf.set("password", ""); conf.set("inputpath", "temp"); conf.set("outputpath", "temp"); conf.set("prospects.outputpath", "prospects"); conf.set("spo.outputpath", "spo"); return conf; }
2. AbstractSqoopSentryTestBase#setupConf()
Project: incubator-sentry
File: AbstractSqoopSentryTestBase.java
File: AbstractSqoopSentryTestBase.java
public static void setupConf() throws Exception { baseDir = createTempDir(); sqoopDir = new File(baseDir, "sqoop"); dbDir = new File(baseDir, "sentry_policy_db"); policyFilePath = new File(baseDir, "local_policy_file.ini"); policyFile = new PolicyFile(); /** set the configuratoion for Sentry Service */ Configuration conf = new Configuration(); conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.ADMIN_GROUPS, Joiner.on(",").join(ADMIN_GROUP, UserGroupInformation.getLoginUser().getPrimaryGroupName())); conf.set(ServerConfig.RPC_ADDRESS, SERVER_HOST); conf.set(ServerConfig.RPC_PORT, String.valueOf(NetworkUtils.findAvailablePort())); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); server = new SentryServiceFactory().create(conf); }
3. AbstractSqoopSentryTestBase#setupConf()
Project: sentry
File: AbstractSqoopSentryTestBase.java
File: AbstractSqoopSentryTestBase.java
public static void setupConf() throws Exception { baseDir = createTempDir(); sqoopDir = new File(baseDir, "sqoop"); dbDir = new File(baseDir, "sentry_policy_db"); policyFilePath = new File(baseDir, "local_policy_file.ini"); policyFile = new PolicyFile(); /** set the configuratoion for Sentry Service */ Configuration conf = new Configuration(); conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.ADMIN_GROUPS, Joiner.on(",").join(ADMIN_GROUP, UserGroupInformation.getLoginUser().getPrimaryGroupName())); conf.set(ServerConfig.RPC_ADDRESS, SERVER_HOST); conf.set(ServerConfig.RPC_PORT, String.valueOf(NetworkUtils.findAvailablePort())); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dbDir.getPath() + ";create=true"); conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy"); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); server = new SentryServiceFactory().create(conf); }
4. AbstractKafkaSentryTestBase#getClientConfig()
Project: sentry
File: AbstractKafkaSentryTestBase.java
File: AbstractKafkaSentryTestBase.java
private static Configuration getClientConfig() { Configuration conf = new Configuration(); /** set the Sentry client configuration for Kafka Service integration */ conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress().getHostName()); conf.setInt(ClientConfig.SERVER_RPC_PORT, sentryServer.getAddress().getPort()); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER.getVar(), LocalGroupResourceAuthorizationProvider.class.getName()); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), SentryGenericProviderBackend.class.getName()); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); conf.setBoolean(ClientConfig.ENABLE_CACHING, true); conf.setLong(ClientConfig.CACHE_TTL_MS, CACHE_TTL_MS); conf.set(ClientConfig.PRIVILEGE_CONVERTER, KafkaTSentryPrivilegeConverter.class.getName()); return conf; }
5. RowKeyDistributorTestBase#beforeClass()
Project: cdap
File: RowKeyDistributorTestBase.java
File: RowKeyDistributorTestBase.java
@BeforeClass public static void beforeClass() throws Exception { if (!runBefore) { return; } testingUtility = new HBaseTestingUtility(); Configuration hConf = testingUtility.getConfiguration(); hConf.set("yarn.is.minicluster", "true"); // Tune down the connection thread pool size hConf.setInt("hbase.hconnection.threads.core", 5); hConf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver hConf.setInt("hbase.regionserver.handler.count", 10); // Set to random port hConf.setInt("hbase.master.port", Networks.getRandomPort()); hConf.setInt("hbase.master.info.port", Networks.getRandomPort()); hConf.setInt("hbase.regionserver.port", Networks.getRandomPort()); hConf.setInt("hbase.regionserver.info.port", Networks.getRandomPort()); testingUtility.startMiniCluster(); hTable = testingUtility.createTable(TABLE, CF); }
6. AbstractMRNewApiSaveTest#testUpsertParamJsonScript()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testUpsertParamJsonScript() throws Exception { Configuration conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/upsert-script-param"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "yes"); conf.set(ConfigurationOptions.ES_WRITE_OPERATION, "upsert"); conf.set(ConfigurationOptions.ES_MAPPING_ID, "number"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT, "counter += param1; anothercounter += param2"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_LANG, "groovy"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_PARAMS_JSON, "{ \"param1\":1, \"param2\":2}"); runJob(conf); }
7. AbstractMRNewApiSaveTest#testUpsertParamScript()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testUpsertParamScript() throws Exception { Configuration conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/upsert-script-param"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "yes"); conf.set(ConfigurationOptions.ES_WRITE_OPERATION, "upsert"); conf.set(ConfigurationOptions.ES_MAPPING_ID, "number"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT, "counter += param1; anothercounter += param2"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_LANG, "groovy"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_PARAMS, " param1:<1>, param2:number "); runJob(conf); }
8. AbstractMRNewApiSaveTest#testUpdateOnlyParamJsonScript()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testUpdateOnlyParamJsonScript() throws Exception { Configuration conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/createwithid"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "yes"); conf.set(ConfigurationOptions.ES_WRITE_OPERATION, "update"); conf.set(ConfigurationOptions.ES_MAPPING_ID, "number"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT, "counter = param1; anothercounter = param2"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_LANG, "groovy"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_PARAMS_JSON, "{ \"param1\":1, \"param2\":2}"); runJob(conf); }
9. AbstractMRNewApiSaveTest#testUpdateOnlyParamScript()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testUpdateOnlyParamScript() throws Exception { Configuration conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/createwithid"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "yes"); conf.set(ConfigurationOptions.ES_WRITE_OPERATION, "update"); conf.set(ConfigurationOptions.ES_MAPPING_ID, "number"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT, "counter = param1; anothercounter = param2"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_LANG, "groovy"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_PARAMS, " param1:<1>, param2:number "); runJob(conf); }
10. AbstractMRNewApiSaveTest#testUpdateOnlyScript()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testUpdateOnlyScript() throws Exception { Configuration conf = createConf(); // use an existing id to allow the update to succeed conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/createwithid"); conf.set(ConfigurationOptions.ES_WRITE_OPERATION, "update"); conf.set(ConfigurationOptions.ES_MAPPING_ID, "number"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "yes"); conf.set(ConfigurationOptions.ES_UPDATE_RETRY_ON_CONFLICT, "3"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT, "counter = 3"); conf.set(ConfigurationOptions.ES_UPDATE_SCRIPT_LANG, "groovy"); runJob(conf); }
11. TestFaultTolerance#testBasicInputFailureWithoutExit()
Project: tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testBasicInputFailureWithoutExit() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL, "v2"), true); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_INDEX, "v2"), "1"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_ATTEMPT, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_INPUT_INDEX, "v2"), "0"); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 1), 4); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 0), 3); DAG dag = SimpleTestDAG.createDAG("testBasicInputFailureWithoutExit", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
12. TestDagEngine#testJobDefinition()
Project: oozie
File: TestDagEngine.java
File: TestDagEngine.java
public void testJobDefinition() throws Exception { Reader reader = IOUtils.getResourceAsReader("wf-ext-schema-valid.xml", -1); Writer writer = new FileWriter(getTestCaseDir() + "/workflow.xml"); IOUtils.copyCharStream(reader, writer); final DagEngine engine = new DagEngine(getTestUser(), "a"); Configuration conf = new XConfiguration(); conf.set(OozieClient.APP_PATH, getTestCaseDir() + File.separator + "workflow.xml"); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, getTestGroup()); injectKerberosInfo(conf); conf.set(OozieClient.LOG_TOKEN, "t"); conf.set("signal-value", "OK"); conf.set("external-status", "ok"); conf.set("error", "end.error"); String jobId1 = engine.submitJob(conf, false); String def = engine.getDefinition(jobId1); assertNotNull(def); }
13. AbstractSqoopSentryTestBase#getClientConfig()
Project: sentry
File: AbstractSqoopSentryTestBase.java
File: AbstractSqoopSentryTestBase.java
private static Configuration getClientConfig() { Configuration conf = new Configuration(); /** set the Sentry client configuration for Sqoop Service integration */ conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress().getHostName()); conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(server.getAddress().getPort())); conf.set(AuthzConfVars.AUTHZ_PROVIDER.getVar(), LocalGroupResourceAuthorizationProvider.class.getName()); conf.set(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), SentryGenericProviderBackend.class.getName()); conf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); conf.set(AuthzConfVars.AUTHZ_TESTING_MODE.getVar(), "true"); return conf; }
14. TestFaultTolerance#testBasicInputFailureWithoutExit()
Project: incubator-tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testBasicInputFailureWithoutExit() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL, "v2"), true); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_INDEX, "v2"), "1"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_ATTEMPT, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_INPUT_INDEX, "v2"), "0"); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 1), 4); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 0), 3); DAG dag = SimpleTestDAG.createDAG("testBasicInputFailureWithoutExit", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
15. AbstractSqoopSentryTestBase#getClientConfig()
Project: incubator-sentry
File: AbstractSqoopSentryTestBase.java
File: AbstractSqoopSentryTestBase.java
private static Configuration getClientConfig() { Configuration conf = new Configuration(); /** set the Sentry client configuration for Sqoop Service integration */ conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ClientConfig.SERVER_RPC_ADDRESS, server.getAddress().getHostName()); conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(server.getAddress().getPort())); conf.set(AuthzConfVars.AUTHZ_PROVIDER.getVar(), LocalGroupResourceAuthorizationProvider.class.getName()); conf.set(AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), SentryGenericProviderBackend.class.getName()); conf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); conf.set(AuthzConfVars.AUTHZ_TESTING_MODE.getVar(), "true"); return conf; }
16. MongoRyaDirectExample#getConf()
Project: incubator-rya
File: MongoRyaDirectExample.java
File: MongoRyaDirectExample.java
private static Configuration getConf() { Configuration conf = new Configuration(); conf.set(ConfigUtils.USE_MONGO, "true"); conf.set(MongoDBRdfConfiguration.USE_TEST_MONGO, "true"); conf.set(MongoDBRdfConfiguration.MONGO_DB_NAME, MONGO_DB); conf.set(MongoDBRdfConfiguration.MONGO_COLLECTION_PREFIX, MONGO_COLL_PREFIX); conf.set(ConfigUtils.GEO_PREDICATES_LIST, "http://www.opengis.net/ont/geosparql#asWKT"); conf.set(ConfigUtils.USE_GEO, "true"); conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, MONGO_COLL_PREFIX); return conf; }
17. TestGetConf#testNonFederation()
Project: hadoop-20
File: TestGetConf.java
File: TestGetConf.java
/** * Tests to make sure the returned addresses are correct in case of default * configuration with no federation */ @Test public void testNonFederation() throws Exception { Configuration conf = new Configuration(); // Returned namenode address should match default address conf.set("fs.default.name", "hdfs://localhost:1000"); verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1000"); // Returned namenode address should match service RPC address conf = new Configuration(); conf.set(NameNode.DATANODE_PROTOCOL_ADDRESS, "localhost:1000"); conf.set(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001"); verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1000"); // Returned address should match RPC address conf = new Configuration(); conf.set(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001"); verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1001"); }
18. CouchBaseBenchmarkTest#testCouchBaseAppInput()
Project: apex-malhar
File: CouchBaseBenchmarkTest.java
File: CouchBaseBenchmarkTest.java
@Test public void testCouchBaseAppInput() throws FileNotFoundException, IOException { Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-couchbase.xml"); conf.addResource(is); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.uriString"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.blocktime"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.timeout"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.bucket"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.password"); LocalMode lm = LocalMode.newInstance(); try { lm.prepareDAG(new CouchBaseAppInput(), conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { logger.info(ex.getCause()); } is.close(); }
19. UtilsTest#testGetNonNegativeLong()
Project: dr-elephant
File: UtilsTest.java
File: UtilsTest.java
@Test public void testGetNonNegativeLong() { Configuration conf = new Configuration(); conf.set("foo1", "100"); conf.set("foo2", "-100"); conf.set("foo3", "0"); conf.set("foo4", "0.5"); conf.set("foo5", "9999999999999999"); conf.set("foo6", "bar"); long defaultValue = 50; assertEquals(100, Utils.getNonNegativeLong(conf, "foo1", defaultValue)); assertEquals(0, Utils.getNonNegativeLong(conf, "foo2", defaultValue)); assertEquals(0, Utils.getNonNegativeLong(conf, "foo3", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeLong(conf, "foo4", defaultValue)); assertEquals(9999999999999999L, Utils.getNonNegativeLong(conf, "foo5", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeLong(conf, "foo6", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeLong(conf, "foo7", defaultValue)); }
20. UtilsTest#testGetNonNegativeInt()
Project: dr-elephant
File: UtilsTest.java
File: UtilsTest.java
@Test public void testGetNonNegativeInt() { Configuration conf = new Configuration(); conf.set("foo1", "100"); conf.set("foo2", "-100"); conf.set("foo3", "0"); conf.set("foo4", "0.5"); conf.set("foo5", "9999999999999999"); conf.set("foo6", "bar"); int defaultValue = 50; assertEquals(100, Utils.getNonNegativeInt(conf, "foo1", defaultValue)); assertEquals(0, Utils.getNonNegativeInt(conf, "foo2", defaultValue)); assertEquals(0, Utils.getNonNegativeInt(conf, "foo3", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeInt(conf, "foo4", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeInt(conf, "foo5", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeInt(conf, "foo6", defaultValue)); assertEquals(defaultValue, Utils.getNonNegativeInt(conf, "foo7", defaultValue)); }
21. XDataTestCase#getCoordConf()
Project: oozie
File: XDataTestCase.java
File: XDataTestCase.java
/** * Get coordinator configuration * * @param appPath application path * @return coordinator configuration * @throws IOException thrown if unable to get coord conf */ protected Configuration getCoordConf(Path appPath) throws IOException { Path wfAppPath = new Path(getFsTestCaseDir(), "coord"); Configuration jobConf = new XConfiguration(); jobConf.set(OozieClient.COORDINATOR_APP_PATH, appPath.toString()); jobConf.set(OozieClient.USER_NAME, getTestUser()); jobConf.set(OozieClient.GROUP_NAME, getTestGroup()); jobConf.set("jobTracker", getJobTrackerUri()); jobConf.set("nameNode", getNameNodeUri()); jobConf.set("wfAppPath", wfAppPath.toString()); injectKerberosInfo(jobConf); String content = "<workflow-app xmlns='uri:oozie:workflow:0.1' xmlns:sla='uri:oozie:sla:0.1' name='no-op-wf'>"; content += "<start to='end' />"; content += "<end name='end' /></workflow-app>"; writeToFile(content, wfAppPath, "workflow.xml"); return jobConf; }
22. TestUnorderedPartitionedKVWriter#createConfiguration()
Project: tez
File: TestUnorderedPartitionedKVWriter.java
File: TestUnorderedPartitionedKVWriter.java
private Configuration createConfiguration(OutputContext outputContext, Class<? extends Writable> keyClass, Class<? extends Writable> valClass, boolean shouldCompress, int maxSingleBufferSizeBytes, Class<? extends Partitioner> partitionerClass) { Configuration conf = new Configuration(false); conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, outputContext.getWorkDirs()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, keyClass.getName()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, valClass.getName()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, partitionerClass.getName()); if (maxSingleBufferSizeBytes >= 0) { conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_MAX_PER_BUFFER_SIZE_BYTES, maxSingleBufferSizeBytes); } conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS, shouldCompress); if (shouldCompress) { conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS_CODEC, DefaultCodec.class.getName()); } conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_REPORT_PARTITION_STATS, reportPartitionStats.getType()); return conf; }
23. TestPipelinedSorter#getConf()
Project: tez
File: TestPipelinedSorter.java
File: TestPipelinedSorter.java
public static Configuration getConf() { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "file:///"); //To enable PipelinedSorter conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_SORTER_CLASS, SorterImpl.PIPELINED.name()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, Text.class.getName()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, Text.class.getName()); conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, HashPartitioner.class.getName()); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, true); //Setup localdirs if (workDir != null) { String localDirs = workDir.toString(); conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs); } return conf; }
24. JavaActionExecutor#createBaseHadoopConf()
Project: oozie
File: JavaActionExecutor.java
File: JavaActionExecutor.java
public Configuration createBaseHadoopConf(Context context, Element actionXml) { Configuration conf = new XConfiguration(); conf.set(HADOOP_USER, context.getProtoActionConf().get(WorkflowAppService.HADOOP_USER)); conf.set(HADOOP_UGI, context.getProtoActionConf().get(WorkflowAppService.HADOOP_UGI)); if (context.getProtoActionConf().get(WorkflowAppService.HADOOP_JT_KERBEROS_NAME) != null) { conf.set(WorkflowAppService.HADOOP_JT_KERBEROS_NAME, context.getProtoActionConf().get(WorkflowAppService.HADOOP_JT_KERBEROS_NAME)); } if (context.getProtoActionConf().get(WorkflowAppService.HADOOP_NN_KERBEROS_NAME) != null) { conf.set(WorkflowAppService.HADOOP_NN_KERBEROS_NAME, context.getProtoActionConf().get(WorkflowAppService.HADOOP_NN_KERBEROS_NAME)); } conf.set(OozieClient.GROUP_NAME, context.getProtoActionConf().get(OozieClient.GROUP_NAME)); Namespace ns = actionXml.getNamespace(); String jobTracker = actionXml.getChild("job-tracker", ns).getTextTrim(); String nameNode = actionXml.getChild("name-node", ns).getTextTrim(); conf.set(HADOOP_JOB_TRACKER, jobTracker); conf.set(HADOOP_NAME_NODE, nameNode); conf.set("mapreduce.fileoutputcommitter.marksuccessfuljobs", "true"); return conf; }
25. TSOForHBaseCompactorTestModule#provideHBaseConfig()
Project: omid
File: TSOForHBaseCompactorTestModule.java
File: TSOForHBaseCompactorTestModule.java
@Provides @Singleton Configuration provideHBaseConfig() throws IOException { Configuration hbaseConf = HBaseConfiguration.create(); hbaseConf.setInt("hbase.hregion.memstore.flush.size", 10_000 * 1024); hbaseConf.setInt("hbase.regionserver.nbreservationblocks", 1); hbaseConf.set("tso.host", "localhost"); hbaseConf.setInt("tso.port", 1234); hbaseConf.set("hbase.coprocessor.region.classes", "org.apache.omid.transaction.OmidCompactor"); final String rootdir = "/tmp/hbase.test.dir/"; File rootdirFile = new File(rootdir); FileUtils.deleteDirectory(rootdirFile); hbaseConf.set("hbase.rootdir", rootdir); return hbaseConf; }
26. TestFilePersistentFormatter#testTextFileWithZipFormatter()
Project: lens
File: TestFilePersistentFormatter.java
File: TestFilePersistentFormatter.java
/** * Test text file with zip formatter. * * @throws IOException Signals that an I/O exception has occurred. */ @Test public void testTextFileWithZipFormatter() throws IOException { Configuration conf = new Configuration(); setConf(conf); conf.set("test.partfile.dir", partFileTextDir.toString()); conf.set(LensConfConstants.QUERY_OUTPUT_FILE_EXTN, ".txt"); conf.set(LensConfConstants.QUERY_OUTPUT_HEADER, ""); conf.set(LensConfConstants.QUERY_OUTPUT_SERDE, LazySimpleSerDe.class.getCanonicalName()); conf.setBoolean(LensConfConstants.RESULT_SPLIT_INTO_MULTIPLE, true); conf.setLong(LensConfConstants.RESULT_SPLIT_MULTIPLE_MAX_ROWS, 2L); testFormatter(conf, "UTF8", LensConfConstants.RESULT_SET_PARENT_DIR_DEFAULT, ".zip", getMockedResultSetWithoutComma()); // validate rows List<String> actual = readZipOutputFile(new Path(formatter.getFinalOutputPath()), conf, "UTF-8"); System.out.println("Actual rows:" + actual); Assert.assertEquals(actual, getExpectedTextRowsWithMultipleWithoutComma()); }
27. TestSentryStoreImportExport#setupEnv()
Project: sentry
File: TestSentryStoreImportExport.java
File: TestSentryStoreImportExport.java
@BeforeClass public static void setupEnv() throws Exception { dataDir = new File(Files.createTempDir(), "sentry_policy_db"); Configuration conf = new Configuration(false); conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true"); conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "sentry"); conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); policyFilePath = new File(dataDir, "local_policy_file.ini"); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); policyFile = new PolicyFile(); sentryStore = new SentryStore(conf); String adminUser = "g1"; addGroupsToUser(adminUser, adminGroups); writePolicyFile(); }
28. AbstractKafkaSentryTestBase#getClientConfig()
Project: incubator-sentry
File: AbstractKafkaSentryTestBase.java
File: AbstractKafkaSentryTestBase.java
private static Configuration getClientConfig() { Configuration conf = new Configuration(); /** set the Sentry client configuration for Kafka Service integration */ conf.set(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); conf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress().getHostName()); conf.set(ClientConfig.SERVER_RPC_PORT, String.valueOf(sentryServer.getAddress().getPort())); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER.getVar(), LocalGroupResourceAuthorizationProvider.class.getName()); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_BACKEND.getVar(), SentryGenericProviderBackend.class.getName()); conf.set(KafkaAuthConf.AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), policyFilePath.getPath()); return conf; }
29. TestSentryStoreImportExport#setupEnv()
Project: incubator-sentry
File: TestSentryStoreImportExport.java
File: TestSentryStoreImportExport.java
@BeforeClass public static void setupEnv() throws Exception { dataDir = new File(Files.createTempDir(), "sentry_policy_db"); Configuration conf = new Configuration(false); conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); conf.set(ServerConfig.SENTRY_STORE_JDBC_URL, "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true"); conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "sentry"); conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); policyFilePath = new File(dataDir, "local_policy_file.ini"); conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath()); policyFile = new PolicyFile(); sentryStore = new SentryStore(conf); String adminUser = "g1"; addGroupsToUser(adminUser, adminGroups); writePolicyFile(); }
30. ExternalIndexMain#getConf()
Project: incubator-rya
File: ExternalIndexMain.java
File: ExternalIndexMain.java
private static Configuration getConf() { Configuration conf = new Configuration(); conf.set(ConfigUtils.CLOUDBASE_USER, userStr); conf.set(ConfigUtils.CLOUDBASE_PASSWORD, passStr); conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instStr); conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zooStr); conf.set(ConfigUtils.CLOUDBASE_AUTHS, AUTHS); conf.setBoolean(ConfigUtils.DISPLAY_QUERY_PLAN, true); return conf; }
31. TSOForHBaseCompactorTestModule#provideHBaseConfig()
Project: incubator-omid
File: TSOForHBaseCompactorTestModule.java
File: TSOForHBaseCompactorTestModule.java
@Provides @Singleton Configuration provideHBaseConfig() throws IOException { Configuration hbaseConf = HBaseConfiguration.create(); hbaseConf.setInt("hbase.hregion.memstore.flush.size", 10_000 * 1024); hbaseConf.setInt("hbase.regionserver.nbreservationblocks", 1); hbaseConf.set("tso.host", "localhost"); hbaseConf.setInt("tso.port", 1234); hbaseConf.set("hbase.coprocessor.region.classes", "org.apache.omid.transaction.OmidCompactor"); final String rootdir = "/tmp/hbase.test.dir/"; File rootdirFile = new File(rootdir); FileUtils.deleteDirectory(rootdirFile); hbaseConf.set("hbase.rootdir", rootdir); return hbaseConf; }
32. TestScanWhenTTLExpired#before()
Project: hindex
File: TestScanWhenTTLExpired.java
File: TestScanWhenTTLExpired.java
@BeforeClass public static void before() throws Exception { Configuration conf = TESTING_UTIL.getConfiguration(); conf.setInt("hbase.balancer.period", 60000); // Needed because some tests have splits happening on RS that are killed // We don't want to wait 3min for the master to figure it out conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 4000); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, IndexMasterObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, IndexRegionObserver.class.getName()); conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, IndexWALObserver.class.getName()); conf.setBoolean("hbase.use.secondary.index", true); TESTING_UTIL.startMiniCluster(NB_SERVERS); }
33. HadoopStartup#configuration()
Project: ignite
File: HadoopStartup.java
File: HadoopStartup.java
/** * @return Configuration for job run. */ @SuppressWarnings("UnnecessaryFullyQualifiedName") public static Configuration configuration() { Configuration cfg = new Configuration(); cfg.set("fs.defaultFS", "igfs://igfs@localhost"); cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER"); cfg.set("mapreduce.framework.name", "ignite"); cfg.set("mapreduce.jobtracker.address", "localhost:11211"); return cfg; }
34. TestFullLogReconstruction#setUpBeforeClass()
Project: hindex
File: TestFullLogReconstruction.java
File: TestFullLogReconstruction.java
/** * @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setBoolean("dfs.support.append", true); // quicker heartbeat interval for faster DN death notification c.setInt("heartbeat.recheck.interval", 5000); c.setInt("dfs.heartbeat.interval", 1); c.setInt("dfs.socket.timeout", 5000); // faster failover with cluster.shutdown();fs.close() idiom c.setInt("ipc.client.connect.max.retries", 1); c.setInt("dfs.client.block.recovery.retries", 1); TEST_UTIL.startMiniCluster(2); }
35. TestFaultTolerance#testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure()
Project: incubator-tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
/** * Downstream(v3) attempt failure of a vertex connected with * 2 upstream vertices.. * v1 v2 * \ / * v3 * * @throws Exception */ @Test(timeout = 60000) public void testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v3"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v3"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v3"), 1); //v1 input = 2. v2 input = 2 //v3 attempt2 value = 2 + 2 + 3 = 7 testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v3"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v3", 0), 7); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v3", 1), 7); DAG dag = SimpleVTestDAG.createDAG("testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
36. TestAnalyzer#testBasicInputFailureWithExit()
Project: tez
File: TestAnalyzer.java
File: TestAnalyzer.java
private List<StepCheck[]> testBasicInputFailureWithExit() throws Exception { Configuration testConf = new Configuration(false); testConf.setInt(SimpleTestDAG.TEZ_SIMPLE_DAG_NUM_TASKS, 1); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL, "v2"), true); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL_AND_EXIT, "v2"), true); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_INDEX, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_ATTEMPT, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_INPUT_INDEX, "v2"), "0"); StepCheck[] check = { createStep("v1 : 000000_0", CriticalPathDependency.INIT_DEPENDENCY), createStep("v2 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY), createStep("v1 : 000000_1", CriticalPathDependency.OUTPUT_RECREATE_DEPENDENCY), createStep("v2 : 000000_1", CriticalPathDependency.DATA_DEPENDENCY) }; DAG dag = SimpleTestDAG.createDAG("testBasicInputFailureWithExit", testConf); runDAG(dag, DAGStatus.State.SUCCEEDED); return Collections.singletonList(check); }
37. TestFaultTolerance#testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure()
Project: tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
/** * Downstream(v3) attempt failure of a vertex connected with * 2 upstream vertices.. * v1 v2 * \ / * v3 * * @throws Exception */ @Test(timeout = 60000) public void testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v3"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v3"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v3"), 1); //v1 input = 2. v2 input = 2 //v3 attempt2 value = 2 + 2 + 3 = 7 testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v3"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v3", 0), 7); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v3", 1), 7); DAG dag = SimpleVTestDAG.createDAG("testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
38. HadoopDataSourceUtilTest#loadProfiles_attribute()
Project: asakusafw
File: HadoopDataSourceUtilTest.java
File: HadoopDataSourceUtilTest.java
/** * Loads a profile with attributes. */ @Test public void loadProfiles_attribute() { Configuration conf = new Configuration(); conf.set(key("root"), MockHadoopDataSource.class.getName()); conf.set(key("root", "path"), "/"); conf.set(key("root", "hello1"), "world1"); conf.set(key("root", "hello2"), "world2"); conf.set(key("root", "hello3"), "world3"); List<DirectDataSourceProfile> profiles = HadoopDataSourceUtil.loadProfiles(conf); assertThat(profiles.size(), is(1)); DirectDataSourceProfile profile = find(profiles, ""); assertThat(profile.getTargetClass(), equalTo((Object) MockHadoopDataSource.class)); assertThat(profile.getAttributes(), is(map("hello1", "world1", "hello2", "world2", "hello3", "world3"))); }
39. TestModuleProperties#testModuleProperties()
Project: apex-core
File: TestModuleProperties.java
File: TestModuleProperties.java
@Test public void testModuleProperties() { Configuration conf = new Configuration(false); conf.set(StreamingApplication.DT_PREFIX + "operator.o1.prop.myStringProperty", "myStringPropertyValue"); conf.set(StreamingApplication.DT_PREFIX + "operator.o2.prop.stringArrayField", "a,b,c"); conf.set(StreamingApplication.DT_PREFIX + "operator.o2.prop.mapProperty.key1", "key1Val"); conf.set(StreamingApplication.DT_PREFIX + "operator.o2.prop.mapProperty(key1.dot)", "key1dotVal"); conf.set(StreamingApplication.DT_PREFIX + "operator.o2.prop.mapProperty(key2.dot)", "key2dotVal"); LogicalPlan dag = new LogicalPlan(); TestModules.GenericModule o1 = dag.addModule("o1", new TestModules.GenericModule()); TestModules.ValidationTestModule o2 = dag.addModule("o2", new TestModules.ValidationTestModule()); LogicalPlanConfiguration pb = new LogicalPlanConfiguration(conf); pb.setModuleProperties(dag, "testSetOperatorProperties"); Assert.assertEquals("o1.myStringProperty", "myStringPropertyValue", o1.getMyStringProperty()); Assert.assertArrayEquals("o2.stringArrayField", new String[] { "a", "b", "c" }, o2.getStringArrayField()); Assert.assertEquals("o2.mapProperty.key1", "key1Val", o2.getMapProperty().get("key1")); Assert.assertEquals("o2.mapProperty(key1.dot)", "key1dotVal", o2.getMapProperty().get("key1.dot")); Assert.assertEquals("o2.mapProperty(key2.dot)", "key2dotVal", o2.getMapProperty().get("key2.dot")); }
40. PropertyFilterMap#createConfiguration()
Project: faunus
File: PropertyFilterMap.java
File: PropertyFilterMap.java
public static Configuration createConfiguration(final Class<? extends Element> klass, final String key, final Compare compare, final Object... values) { final String[] valueStrings = new String[values.length]; Class valueClass = null; for (int i = 0; i < values.length; i++) { valueStrings[i] = (null == values[i]) ? valueStrings[i] = null : values[i].toString(); if (null != values[i]) valueClass = values[i].getClass(); } if (null == valueClass) valueClass = Object.class; final Configuration configuration = new EmptyConfiguration(); configuration.setClass(CLASS, klass, Element.class); configuration.set(KEY, key); configuration.set(COMPARE, compare.name()); configuration.setStrings(VALUES, valueStrings); configuration.setClass(VALUE_CLASS, valueClass, valueClass); return configuration; }
41. TestSaveNamespace#getConf()
Project: hadoop-20
File: TestSaveNamespace.java
File: TestSaveNamespace.java
private Configuration getConf() throws IOException { String baseDir = System.getProperty("test.build.data", "build/test/data/dfs/"); String nameDirs = baseDir + "name1" + "," + baseDir + "name2"; String editsDirs = baseDir + "edits1" + "," + baseDir + "edits2"; Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set("dfs.http.address", "0.0.0.0:0"); conf.set("dfs.name.dir", nameDirs); conf.set("dfs.name.edits.dir", editsDirs); conf.set("dfs.secondary.http.address", "0.0.0.0:0"); conf.setBoolean("dfs.permissions", false); return conf; }
42. TestProxyUtil#testSendCommand()
Project: hadoop-hdfs
File: TestProxyUtil.java
File: TestProxyUtil.java
public void testSendCommand() throws Exception { Configuration conf = new Configuration(false); conf.addResource("ssl-client.xml"); conf.addResource("hdfsproxy-default.xml"); String address = "localhost:" + TEST_PROXY_HTTPS_PORT; conf.set("hdfsproxy.https.address", address); String hostFname = TEST_PROXY_CONF_DIR + "/hdfsproxy-hosts"; conf.set("hdfsproxy.hosts", hostFname); assertTrue(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); assertTrue(ProxyUtil.sendCommand(conf, "/test/clearUgiCache")); conf.set("hdfsproxy.https.address", "localhost:0"); assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); }
43. TestFileCreation#testServerDefaults()
Project: hadoop-hdfs
File: TestFileCreation.java
File: TestFileCreation.java
/** * Test that server default values can be retrieved on the client side */ public void testServerDefaults() throws IOException { Configuration conf = new Configuration(); conf.setLong("dfs.block.size", FSConstants.DEFAULT_BLOCK_SIZE); conf.setInt("io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM); conf.setInt("dfs.write.packet.size", FSConstants.DEFAULT_WRITE_PACKET_SIZE); conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1); conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE); MiniDFSCluster cluster = new MiniDFSCluster(conf, FSConstants.DEFAULT_REPLICATION_FACTOR + 1, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { FsServerDefaults serverDefaults = fs.getServerDefaults(); assertEquals(FSConstants.DEFAULT_BLOCK_SIZE, serverDefaults.getBlockSize()); assertEquals(FSConstants.DEFAULT_BYTES_PER_CHECKSUM, serverDefaults.getBytesPerChecksum()); assertEquals(FSConstants.DEFAULT_WRITE_PACKET_SIZE, serverDefaults.getWritePacketSize()); assertEquals(FSConstants.DEFAULT_REPLICATION_FACTOR + 1, serverDefaults.getReplication()); assertEquals(FSConstants.DEFAULT_FILE_BUFFER_SIZE, serverDefaults.getFileBufferSize()); } finally { fs.close(); cluster.shutdown(); } }
44. TestProxyUtil#testSendCommand()
Project: hadoop-common
File: TestProxyUtil.java
File: TestProxyUtil.java
public void testSendCommand() throws Exception { Configuration conf = new Configuration(false); conf.addResource("ssl-client.xml"); conf.addResource("hdfsproxy-default.xml"); String address = "localhost:" + TEST_PROXY_HTTPS_PORT; conf.set("hdfsproxy.https.address", address); String hostFname = TEST_PROXY_CONF_DIR + "/hdfsproxy-hosts"; conf.set("hdfsproxy.hosts", hostFname); assertTrue(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); assertTrue(ProxyUtil.sendCommand(conf, "/test/clearUgiCache")); conf.set("hdfsproxy.https.address", "localhost:0"); assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles")); }
45. TestAnalyzer#testBasicInputFailureWithoutExit()
Project: tez
File: TestAnalyzer.java
File: TestAnalyzer.java
private List<StepCheck[]> testBasicInputFailureWithoutExit() throws Exception { Configuration testConf = new Configuration(false); testConf.setInt(SimpleTestDAG.TEZ_SIMPLE_DAG_NUM_TASKS, 1); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL, "v2"), true); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_INDEX, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_ATTEMPT, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_INPUT_INDEX, "v2"), "0"); StepCheck[] check = { createStep("v1 : 000000_0", CriticalPathDependency.INIT_DEPENDENCY), createStep("v2 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY), createStep("v1 : 000000_1", CriticalPathDependency.OUTPUT_RECREATE_DEPENDENCY), createStep("v2 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY) }; DAG dag = SimpleTestDAG.createDAG("testBasicInputFailureWithoutExit", testConf); runDAG(dag, DAGStatus.State.SUCCEEDED); return Collections.singletonList(check); }
46. TestFaultTolerance#testBasicInputFailureWithoutExitDeadline()
Project: tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testBasicInputFailureWithoutExitDeadline() throws Exception { Configuration testConf = new Configuration(false); // 1 error < 0.4 fail fraction testConf.setInt(SimpleTestDAG.TEZ_SIMPLE_DAG_NUM_TASKS, 3); testConf.setBoolean(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_DO_FAIL, "v2"), true); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_INDEX, "v2"), "2"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_TASK_ATTEMPT, "v2"), "0"); testConf.set(TestInput.getVertexConfName(TestInput.TEZ_FAILING_INPUT_FAILING_INPUT_INDEX, "v2"), "0"); DAG dag = SimpleTestDAG.createDAG("testBasicInputFailureWithoutExitDeadline", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
47. TestFaultTolerance#testTaskMultipleFailures()
Project: tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testTaskMultipleFailures() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 1); //v1 task0,1 attempt 2 succeed. Input sum = 6. Plus one (v2 attempt0). //ending sum is 7. testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 0), 7); DAG dag = SimpleTestDAG.createDAG("testTaskMultipleFailures", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED, 4); }
48. TestFaultTolerance#testBasicTaskFailure()
Project: tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testBasicTaskFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 0); //verify value at v2 task1 testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "1"); //value of v2 task1 is 4. //v1 attempt0 has value of 1 (attempt index + 1). //v1 attempt1 has value of 2 (attempt index + 1). //v3 attempt0 verifies value of 1 + 2 (values from input vertices) // + 1 (attempt index + 1) = 4 testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 1), 4); DAG dag = SimpleTestDAG.createDAG("testBasicTaskFailure", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED, 1); }
49. TestHRegion#initSplit()
Project: hindex
File: TestHRegion.java
File: TestHRegion.java
private Configuration initSplit() { Configuration conf = HBaseConfiguration.create(this.conf); // Always compact if there is more than one store file. conf.setInt("hbase.hstore.compactionThreshold", 2); // Make lease timeout longer, lease checks less frequent conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000); conf.setInt(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, 10 * 1000); // Increase the amount of time between client retries conf.setLong("hbase.client.pause", 15 * 1000); // This size should make it so we always split using the addContent // below. After adding all data, the first region is 1.3M conf.setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128); return conf; }
50. TestWALObserver#setupBeforeClass()
Project: hindex
File: TestWALObserver.java
File: TestWALObserver.java
@BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, SampleRegionWALObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, SampleRegionWALObserver.class.getName()); conf.setBoolean("dfs.support.append", true); conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(1); Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); LOG.info("hbase.rootdir=" + hbaseRootDir); conf.set(HConstants.HBASE_DIR, hbaseRootDir.toString()); }
51. TestFaultTolerance#testTaskMultipleFailures()
Project: incubator-tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testTaskMultipleFailures() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0,1"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 1); //v1 task0,1 attempt 2 succeed. Input sum = 6. Plus one (v2 attempt0). //ending sum is 7. testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 0), 7); DAG dag = SimpleTestDAG.createDAG("testTaskMultipleFailures", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
52. TestFaultTolerance#testBasicTaskFailure()
Project: incubator-tez
File: TestFaultTolerance.java
File: TestFaultTolerance.java
@Test(timeout = 60000) public void testBasicTaskFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 0); //verify value at v2 task1 testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_TASK_INDEX, "v2"), "1"); //value of v2 task1 is 4. //v1 attempt0 has value of 1 (attempt index + 1). //v1 attempt1 has value of 2 (attempt index + 1). //v3 attempt0 verifies value of 1 + 2 (values from input vertices) // + 1 (attempt index + 1) = 4 testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_VERIFY_VALUE, "v2", 1), 4); DAG dag = SimpleTestDAG.createDAG("testBasicTaskFailure", testConf); runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED); }
53. TestUnorderedPartitionedKVWriter#createConfiguration()
Project: incubator-tez
File: TestUnorderedPartitionedKVWriter.java
File: TestUnorderedPartitionedKVWriter.java
private Configuration createConfiguration(TezOutputContext outputContext, Class<? extends Writable> keyClass, Class<? extends Writable> valClass, boolean shouldCompress, int maxSingleBufferSizeBytes, Class<? extends Partitioner> partitionerClass) { Configuration conf = new Configuration(false); conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, outputContext.getWorkDirs()); conf.set(TezJobConfig.TEZ_RUNTIME_KEY_CLASS, keyClass.getName()); conf.set(TezJobConfig.TEZ_RUNTIME_VALUE_CLASS, valClass.getName()); conf.set(TezJobConfig.TEZ_RUNTIME_PARTITIONER_CLASS, partitionerClass.getName()); if (maxSingleBufferSizeBytes >= 0) { conf.setInt(TezJobConfig.TEZ_RUNTIME_UNORDERED_OUTPUT_MAX_PER_BUFFER_SIZE_BYTES, maxSingleBufferSizeBytes); } conf.setBoolean(TezJobConfig.TEZ_RUNTIME_COMPRESS, shouldCompress); if (shouldCompress) { conf.set(TezJobConfig.TEZ_RUNTIME_COMPRESS_CODEC, DefaultCodec.class.getName()); } return conf; }
54. IgniteHadoopFileSystemHandshakeSelfTest#configuration()
Project: ignite
File: IgniteHadoopFileSystemHandshakeSelfTest.java
File: IgniteHadoopFileSystemHandshakeSelfTest.java
/** * Create configuration for test. * * @param authority Authority. * @return Configuration. */ private static Configuration configuration(String authority, boolean tcp) { Configuration cfg = new Configuration(); cfg.set("fs.defaultFS", "igfs://" + authority + "/"); cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.setBoolean("fs.igfs.impl.disable.cache", true); if (tcp) cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); else cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority), true); cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); return cfg; }
55. HdfsTestUtil#setupClass()
Project: incubator-sentry
File: HdfsTestUtil.java
File: HdfsTestUtil.java
public static MiniDFSCluster setupClass(String dataDir) throws Exception { File dir = new File(dataDir); new File(dataDir).mkdirs(); savedLocale = Locale.getDefault(); // TODO: we HACK around HADOOP-9643 Locale.setDefault(Locale.ENGLISH); int dataNodes = 2; Configuration conf = new Configuration(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions.enabled", "false"); conf.set("hadoop.security.authentication", "simple"); conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir"); conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir"); System.setProperty("test.build.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build"); System.setProperty("test.cache.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache"); System.setProperty("solr.lock.type", "hdfs"); MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); return dfsCluster; }
56. TestTezTaskRunner2#testTaskConfUsage()
Project: tez
File: TestTezTaskRunner2.java
File: TestTezTaskRunner2.java
@Test(timeout = 5000) public void testTaskConfUsage() throws Exception { Configuration conf = new Configuration(false); conf.set("global", "global1"); conf.set("global_override", "global1"); String[] localDirs = null; Configuration taskConf = new Configuration(false); conf.set("global_override", "task1"); conf.set("task", "task1"); List<InputSpec> inputSpecList = new ArrayList<>(); List<OutputSpec> outputSpecList = new ArrayList<>(); TaskSpec taskSpec = new TaskSpec("dagName", "vertexName", 1, mock(ProcessorDescriptor.class), inputSpecList, outputSpecList, null, taskConf); TezTaskRunner2 taskRunner2 = new TezTaskRunner2(conf, mock(UserGroupInformation.class), localDirs, taskSpec, 1, null, null, null, mock(TaskReporter.class), null, null, "pid", null, 1000, false, new DefaultHadoopShim()); Assert.assertEquals("global1", taskRunner2.task.getTaskConf().get("global")); Assert.assertEquals("task1", taskRunner2.task.getTaskConf().get("global_override")); Assert.assertEquals("task1", taskRunner2.task.getTaskConf().get("task")); }
57. TestMRHelpers#testTranslateMRConfToTez()
Project: tez
File: TestMRHelpers.java
File: TestMRHelpers.java
@Test(timeout = 5000) public void testTranslateMRConfToTez() { Configuration conf = new Configuration(false); conf.setLong(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 1000); conf.setLong(org.apache.tez.mapreduce.hadoop.MRJobConfig.IO_SORT_MB, 500); Configuration conf1 = new Configuration(conf); MRHelpers.translateMRConfToTez(conf1); Assert.assertNull(conf1.get(org.apache.tez.mapreduce.hadoop.MRJobConfig.IO_SORT_MB)); Assert.assertEquals(1000, conf1.getLong(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 0)); Configuration conf2 = new Configuration(conf); MRHelpers.translateMRConfToTez(conf2, true); Assert.assertNull(conf2.get(org.apache.tez.mapreduce.hadoop.MRJobConfig.IO_SORT_MB)); Assert.assertEquals(1000, conf2.getLong(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 0)); Configuration conf3 = new Configuration(conf); MRHelpers.translateMRConfToTez(conf3, false); Assert.assertNull(conf3.get(org.apache.tez.mapreduce.hadoop.MRJobConfig.IO_SORT_MB)); Assert.assertEquals(500, conf3.getLong(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 0)); }
58. TestXConfiguration#testInjectDefaults()
Project: oozie
File: TestXConfiguration.java
File: TestXConfiguration.java
public void testInjectDefaults() throws Exception { Configuration srcConf = new Configuration(false); Configuration targetConf = new Configuration(false); srcConf.set("testParameter1", "valueFromSource"); srcConf.set("testParameter2", "valueFromSource"); targetConf.set("testParameter2", "originalValueFromTarget"); targetConf.set("testParameter3", "originalValueFromTarget"); XConfiguration.injectDefaults(srcConf, targetConf); assertEquals(targetConf.get("testParameter1"), "valueFromSource"); assertEquals(targetConf.get("testParameter2"), "originalValueFromTarget"); assertEquals(targetConf.get("testParameter3"), "originalValueFromTarget"); assertEquals(srcConf.get("testParameter1"), "valueFromSource"); assertEquals(srcConf.get("testParameter2"), "valueFromSource"); assertNull(srcConf.get("testParameter3")); }
59. TestXConfiguration#testCopy()
Project: oozie
File: TestXConfiguration.java
File: TestXConfiguration.java
public void testCopy() throws Exception { Configuration srcConf = new Configuration(false); Configuration targetConf = new Configuration(false); srcConf.set("testParameter1", "valueFromSource"); srcConf.set("testParameter2", "valueFromSource"); targetConf.set("testParameter2", "valueFromTarget"); targetConf.set("testParameter3", "valueFromTarget"); XConfiguration.copy(srcConf, targetConf); assertEquals(targetConf.get("testParameter1"), "valueFromSource"); assertEquals(targetConf.get("testParameter2"), "valueFromSource"); assertEquals(targetConf.get("testParameter3"), "valueFromTarget"); }
60. HdfsTestUtil#setupClass()
Project: sentry
File: HdfsTestUtil.java
File: HdfsTestUtil.java
public static MiniDFSCluster setupClass(String dataDir) throws Exception { File dir = new File(dataDir); new File(dataDir).mkdirs(); savedLocale = Locale.getDefault(); // TODO: we HACK around HADOOP-9643 Locale.setDefault(Locale.ENGLISH); int dataNodes = 2; Configuration conf = new Configuration(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions.enabled", "false"); conf.set("hadoop.security.authentication", "simple"); conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir"); conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir"); System.setProperty("test.build.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build"); System.setProperty("test.cache.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache"); System.setProperty("solr.lock.type", "hdfs"); MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); return dfsCluster; }
61. TestSubmitMRXCommand#testWFXmlGenerationNegative1()
Project: oozie
File: TestSubmitMRXCommand.java
File: TestSubmitMRXCommand.java
public void testWFXmlGenerationNegative1() throws Exception { Configuration conf = new Configuration(); conf.set(XOozieClient.JT, "jobtracker"); conf.set(XOozieClient.NN, "namenode"); // conf.set(XOozieClient.LIBPATH, "libpath"); conf.set("name_a", "value_a"); conf.set("name_b", "value_b"); conf.set("name_c", "value_c"); SubmitMRXCommand submitMRCmd = new SubmitMRXCommand(conf, "token"); try { submitMRCmd.getWorkflowXml(conf); fail("shoud have already failed - missing libpath def"); } catch (Exception e) { } }
62. TestSubmitMRCommand#testWFXmlGenerationWithoutLibPath()
Project: oozie
File: TestSubmitMRCommand.java
File: TestSubmitMRCommand.java
public void testWFXmlGenerationWithoutLibPath() throws Exception { Configuration conf = new Configuration(); conf.set(XOozieClient.JT, "jobtracker"); conf.set(XOozieClient.NN, "namenode"); // conf.set(XOozieClient.LIBPATH, "libpath"); conf.set("name_a", "value_a"); conf.set("name_b", "value_b"); conf.set("name_c", "value_c"); SubmitMRCommand submitMRCmd = new SubmitMRCommand(conf, "token"); submitMRCmd.getWorkflowXml(conf); }
63. TestOozieCLI#createConfigFile()
Project: oozie
File: TestOozieCLI.java
File: TestOozieCLI.java
private String createConfigFile(String appPath) throws Exception { String path = getTestCaseDir() + "/" + getName() + ".xml"; Configuration conf = new Configuration(false); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, getTestGroup()); conf.set(OozieClient.APP_PATH, appPath); conf.set(OozieClient.RERUN_SKIP_NODES, "node"); injectKerberosInfo(conf); OutputStream os = new FileOutputStream(path); conf.writeXml(os); os.close(); return path; }
64. TestClassLoading#setUpBeforeClass()
Project: hindex
File: TestClassLoading.java
File: TestClassLoading.java
@BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // regionCoprocessor1 will be loaded on all regionservers, since it is // loaded for any tables (user or meta). conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, regionCoprocessor1.getName()); // regionCoprocessor2 will be loaded only on regionservers that serve a // user table region. Therefore, if there are no user tables loaded, // this coprocessor will not be loaded on any regionserver. conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, regionCoprocessor2.getName()); conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, regionServerCoprocessor.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, masterCoprocessor.getName()); TEST_UTIL.startMiniCluster(1); cluster = TEST_UTIL.getDFSCluster(); }
65. AccumuloDocIndexerTest#init()
Project: incubator-rya
File: AccumuloDocIndexerTest.java
File: AccumuloDocIndexerTest.java
@Before public void init() throws Exception { accCon = new MockInstance("instance").getConnector("root", "".getBytes()); if (accCon.tableOperations().exists(tableName)) { accCon.tableOperations().delete(tableName); } accCon.tableOperations().create(tableName); Configuration config = new Configuration(); config.set(ConfigUtils.CLOUDBASE_AUTHS, "U"); config.set(ConfigUtils.CLOUDBASE_INSTANCE, "instance"); config.set(ConfigUtils.CLOUDBASE_USER, "root"); config.set(ConfigUtils.CLOUDBASE_PASSWORD, ""); conf = new AccumuloRdfConfiguration(config); conf.set(ConfigUtils.USE_MOCK_INSTANCE, "true"); conf.setAdditionalIndexers(EntityCentricIndex.class); }
66. MongoDBRyaDAOTest#setUp()
Project: incubator-rya
File: MongoDBRyaDAOTest.java
File: MongoDBRyaDAOTest.java
@Before public void setUp() throws IOException, RyaDAOException { testsFactory = MongodForTestsFactory.with(Version.Main.PRODUCTION); Configuration conf = new Configuration(); conf.set(MongoDBRdfConfiguration.USE_TEST_MONGO, "true"); conf.set(MongoDBRdfConfiguration.MONGO_DB_NAME, "test"); conf.set(MongoDBRdfConfiguration.MONGO_COLLECTION_PREFIX, "rya_"); conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, "rya_"); configuration = new MongoDBRdfConfiguration(conf); mongoClient = testsFactory.newMongo(); int port = mongoClient.getServerAddressList().get(0).getPort(); configuration.set(MongoDBRdfConfiguration.MONGO_INSTANCE_PORT, Integer.toString(port)); dao = new MongoDBRyaDAO(configuration, mongoClient); }
67. IgniteHadoopFileSystemAbstractSelfTest#configuration()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** * Create configuration for test. * * @param authority Authority. * @param skipEmbed Whether to skip embedded mode. * @param skipLocShmem Whether to skip local shmem mode. * @return Configuration. */ private static Configuration configuration(String authority, boolean skipEmbed, boolean skipLocShmem) { Configuration cfg = new Configuration(); cfg.set("fs.defaultFS", "igfs://" + authority + "/"); cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName()); cfg.setBoolean("fs.igfs.impl.disable.cache", true); if (skipEmbed) cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); if (skipLocShmem) cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); return cfg; }
68. TestFileSerdeFormatter#testTextFileWithZipFormatter()
Project: lens
File: TestFileSerdeFormatter.java
File: TestFileSerdeFormatter.java
/** * Test text file with zip formatter. * * @throws IOException Signals that an I/O exception has occurred. */ @Test public void testTextFileWithZipFormatter() throws IOException { Configuration conf = new Configuration(); setConf(conf); conf.set(LensConfConstants.QUERY_OUTPUT_FILE_EXTN, ".txt"); conf.set(LensConfConstants.QUERY_OUTPUT_SERDE, LazySimpleSerDe.class.getCanonicalName()); conf.setBoolean(LensConfConstants.RESULT_SPLIT_INTO_MULTIPLE, true); conf.setLong(LensConfConstants.RESULT_SPLIT_MULTIPLE_MAX_ROWS, 2L); testFormatter(conf, "UTF8", LensConfConstants.RESULT_SET_PARENT_DIR_DEFAULT, ".zip", getMockedResultSetWithoutComma()); // validate rows List<String> actual = readZipOutputFile(new Path(formatter.getFinalOutputPath()), conf, "UTF-8"); Assert.assertEquals(actual, getExpectedTextRowsWithMultipleWithoutComma()); }
69. TestFilePersistentFormatter#testTextFilesWithCompression()
Project: lens
File: TestFilePersistentFormatter.java
File: TestFilePersistentFormatter.java
/** * Test text files with compression. * * @throws IOException Signals that an I/O exception has occurred. */ @Test public void testTextFilesWithCompression() throws IOException { Configuration conf = new Configuration(); setConf(conf); conf.set("test.partfile.dir", partFileTextDir.toString()); conf.set(LensConfConstants.QUERY_OUTPUT_FILE_EXTN, ".txt"); conf.setBoolean(LensConfConstants.QUERY_OUTPUT_ENABLE_COMPRESSION, true); conf.set(LensConfConstants.QUERY_OUTPUT_HEADER, "firstcolformat(secondcol,2)thirdcolfourthcolfifthcolsixthcolseventhcol"); testFormatter(conf, "UTF8", LensConfConstants.RESULT_SET_PARENT_DIR_DEFAULT, ".txt.gz", getMockedResultSetWithoutComma()); // validate rows Assert.assertEquals(readCompressedFile(new Path(formatter.getFinalOutputPath()), conf, "UTF-8"), getExpectedTextRows()); }
70. TestFilePersistentFormatter#testTextFileWithSerdeHeader()
Project: lens
File: TestFilePersistentFormatter.java
File: TestFilePersistentFormatter.java
/** * Test text file with serde header. * * @throws IOException Signals that an I/O exception has occurred. */ @Test public void testTextFileWithSerdeHeader() throws IOException { Configuration conf = new Configuration(); setConf(conf); conf.set("test.partfile.dir", partFileTextDir.toString()); conf.set(LensConfConstants.QUERY_OUTPUT_FILE_EXTN, ".txt"); conf.set(LensConfConstants.QUERY_OUTPUT_HEADER, ""); conf.set(LensConfConstants.QUERY_OUTPUT_SERDE, LazySimpleSerDe.class.getCanonicalName()); testFormatter(conf, "UTF8", LensConfConstants.RESULT_SET_PARENT_DIR_DEFAULT, ".txt", getMockedResultSetWithoutComma()); // validate rows Assert.assertEquals(readFinalOutputFile(new Path(formatter.getFinalOutputPath()), conf, "UTF-8"), getExpectedTextRowsWithoutComma()); }
71. TestSubmitXCommand#testSubmitReservedVars()
Project: oozie
File: TestSubmitXCommand.java
File: TestSubmitXCommand.java
public void testSubmitReservedVars() throws Exception { Configuration conf = new XConfiguration(); String appPath = getTestCaseDir(); String appXml = "<workflow-app xmlns='uri:oozie:workflow:0.1' name='map-reduce-wf'> " + "<start to='end' /> " + "<end name='end' /> " + "</workflow-app>"; writeToFile(appXml, appPath + "/workflow.xml"); conf.set(OozieClient.APP_PATH, appPath + "/workflow.xml"); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, "other"); conf.set("GB", "5"); SubmitXCommand sc = new SubmitXCommand(conf, "UNIT_TESTING"); try { sc.call(); fail("WF job submission should fail with reserved variable definitions."); } catch (CommandException ce) { } }
72. TestSubmitCommand#testSubmitReservedVars()
Project: oozie
File: TestSubmitCommand.java
File: TestSubmitCommand.java
public void testSubmitReservedVars() throws Exception { Configuration conf = new XConfiguration(); String appPath = getTestCaseDir(); String appXml = "<workflow-app xmlns='uri:oozie:workflow:0.1' name='map-reduce-wf'> " + "<start to='end' /> " + "<end name='end' /> " + "</workflow-app>"; writeToFile(appXml, appPath + "/workflow.xml"); conf.set(OozieClient.APP_PATH, appPath + "/workflow.xml"); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, "other"); conf.set("GB", "5"); SubmitCommand sc = new SubmitCommand(conf, "UNIT_TESTING"); try { sc.call(); fail("WF job submission should fail with reserved variable definitions."); } catch (CommandException ce) { } }
73. TestCoordSubmitXCommand#testSubmitReservedVars()
Project: oozie
File: TestCoordSubmitXCommand.java
File: TestCoordSubmitXCommand.java
/** * Don't include controls in XML. * * @throws Exception */ public void testSubmitReservedVars() throws Exception { Configuration conf = new XConfiguration(); String appPath = getTestCaseDir() + File.separator + "coordinator.xml"; String appXml = "<coordinator-app name=\"NAME\" frequency=\"10\" start=\"2009-02-01T01:00Z\" end=\"2009-02-03T23:59Z\" timezone=\"UTC\" " + "xmlns=\"uri:oozie:coordinator:0.2\"> " + "<action> <workflow> <app-path>hdfs:///tmp/workflows/</app-path> " + "<configuration> <property> <name>inputA</name> <value>blah</value> </property> " + "</configuration> </workflow> </action> </coordinator-app>"; writeToFile(appXml, appPath); conf.set(OozieClient.COORDINATOR_APP_PATH, appPath); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, "other"); conf.set("MINUTES", "1"); CoordSubmitXCommand sc = new CoordSubmitXCommand(conf, "UNIT_TESTING"); try { sc.call(); fail("Coord job submission should fail with reserved variable definitions."); } catch (CommandException ce) { } }
74. TestCoordSubmitCommand#testSubmitReservedVars()
Project: oozie
File: TestCoordSubmitCommand.java
File: TestCoordSubmitCommand.java
/** * Don't include controls in XML. * * @throws Exception */ public void testSubmitReservedVars() throws Exception { Configuration conf = new XConfiguration(); String appPath = getTestCaseDir() + File.separator + "coordinator.xml"; String appXml = "<coordinator-app name=\"NAME\" frequency=\"10\" start=\"2009-02-01T01:00Z\" end=\"2009-02-03T23:59Z\" timezone=\"UTC\" " + "xmlns=\"uri:oozie:coordinator:0.1\"> " + "<action> <workflow> <app-path>hdfs:///tmp/workflows/</app-path> " + "<configuration> <property> <name>inputA</name> <value>blah</value> </property> " + "</configuration> </workflow> </action> </coordinator-app>"; writeToFile(appXml, appPath); conf.set(OozieClient.COORDINATOR_APP_PATH, appPath); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, "other"); conf.set("MINUTES", "1"); injectKerberosInfo(conf); CoordSubmitCommand sc = new CoordSubmitCommand(conf, "UNIT_TESTING"); try { sc.call(); fail("Coord job submission should fail with reserved variable definitions."); } catch (CommandException ce) { } }
75. TestJavaActionExecutor#addRecordToWfJobTable()
Project: oozie
File: TestJavaActionExecutor.java
File: TestJavaActionExecutor.java
private WorkflowJobBean addRecordToWfJobTable(String wfId, String wfxml) throws Exception { WorkflowApp app = new LiteWorkflowApp("testApp", wfxml, new StartNodeDef("start")).addNode(new EndNodeDef("end")); Configuration conf = new Configuration(); conf.set(OozieClient.APP_PATH, "testPath"); conf.set(OozieClient.LOG_TOKEN, "testToken"); conf.set(OozieClient.USER_NAME, getTestUser()); conf.set(OozieClient.GROUP_NAME, getTestGroup()); injectKerberosInfo(conf); WorkflowJobBean wfBean = createWorkflow(app, conf, "auth"); wfBean.setId(wfId); wfBean.setStatus(WorkflowJob.Status.SUCCEEDED); WorkflowActionBean action = new WorkflowActionBean(); action.setName("test"); action.setCred("null"); action.setId(Services.get().get(UUIDService.class).generateChildId(wfBean.getId(), "test")); wfBean.getActions().add(action); return wfBean; }
76. TestPipelinedSorter#testWithPipelinedShuffle()
Project: tez
File: TestPipelinedSorter.java
File: TestPipelinedSorter.java
@Test public void testWithPipelinedShuffle() throws IOException { this.numOutputs = 1; this.initialAvailableMem = 5 * 1024 * 1024; Configuration conf = getConf(); conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 5); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, false); conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SORTER_MIN_BLOCK_SIZE_IN_MB, 1); PipelinedSorter sorter = new PipelinedSorter(this.outputContext, conf, numOutputs, initialAvailableMem); //Write 100 keys each of size 10 writeData(sorter, 10000, 100); //final merge is disabled. Final output file would not be populated in this case. assertTrue(sorter.finalOutputFile == null); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, true); verify(outputContext, times(1)).sendEvents(anyListOf(Event.class)); }
77. TestTaskSpecificLaunchCmdOption#testTaskSpecificLogOptions()
Project: tez
File: TestTaskSpecificLaunchCmdOption.java
File: TestTaskSpecificLaunchCmdOption.java
@Test(timeout = 5000) public void testTaskSpecificLogOptions() { Configuration conf = new Configuration(false); conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS_LIST, "v1[0,2,5]"); TaskSpecificLaunchCmdOption options; conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG;org.apache.tez=INFO"); options = new TaskSpecificLaunchCmdOption(conf); assertTrue(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); assertEquals(2, options.getTaskSpecificLogParams().length); conf.unset(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL); options = new TaskSpecificLaunchCmdOption(conf); assertFalse(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG"); options = new TaskSpecificLaunchCmdOption(conf); assertTrue(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); assertEquals(1, options.getTaskSpecificLogParams().length); }
78. TestAnalyzer#testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure()
Project: tez
File: TestAnalyzer.java
File: TestAnalyzer.java
/** * Downstream(v3) attempt failure of a vertex connected with * 2 upstream vertices.. * v1 v2 * \ / * v3 * * @throws Exception */ private List<StepCheck[]> testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setInt(SimpleVTestDAG.TEZ_SIMPLE_V_DAG_NUM_TASKS, 1); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v3"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v3"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v3"), 1); StepCheck[] check = { // use regex for either vertices being possible on the path createStep("v[12] : 000000_0", CriticalPathDependency.INIT_DEPENDENCY), createStep("v3 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY), createStep("v3 : 000000_1", CriticalPathDependency.RETRY_DEPENDENCY), createStep("v3 : 000000_2", CriticalPathDependency.RETRY_DEPENDENCY) }; DAG dag = SimpleVTestDAG.createDAG("testAttemptOfDownstreamVertexConnectedWithTwoUpstreamVerticesFailure", testConf); runDAG(dag, DAGStatus.State.SUCCEEDED); return Collections.singletonList(check); }
79. TestAnalyzer#testTaskMultipleFailures()
Project: tez
File: TestAnalyzer.java
File: TestAnalyzer.java
private List<StepCheck[]> testTaskMultipleFailures() throws Exception { Configuration testConf = new Configuration(false); testConf.setInt(SimpleTestDAG.TEZ_SIMPLE_DAG_NUM_TASKS, 1); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 1); StepCheck[] check = { createStep("v1 : 000000_0", CriticalPathDependency.INIT_DEPENDENCY), createStep("v1 : 000000_1", CriticalPathDependency.RETRY_DEPENDENCY), createStep("v1 : 000000_2", CriticalPathDependency.RETRY_DEPENDENCY), createStep("v2 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY) }; DAG dag = SimpleTestDAG.createDAG("testTaskMultipleFailures", testConf); runDAG(dag, DAGStatus.State.SUCCEEDED); return Collections.singletonList(check); }
80. TestAnalyzer#testBasicTaskFailure()
Project: tez
File: TestAnalyzer.java
File: TestAnalyzer.java
private List<StepCheck[]> testBasicTaskFailure() throws Exception { Configuration testConf = new Configuration(false); testConf.setInt(SimpleTestDAG.TEZ_SIMPLE_DAG_NUM_TASKS, 1); testConf.setBoolean(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_DO_FAIL, "v1"), true); testConf.set(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_TASK_INDEX, "v1"), "0"); testConf.setInt(TestProcessor.getVertexConfName(TestProcessor.TEZ_FAILING_PROCESSOR_FAILING_UPTO_TASK_ATTEMPT, "v1"), 0); StepCheck[] check = { createStep("v1 : 000000_0", CriticalPathDependency.INIT_DEPENDENCY), createStep("v1 : 000000_1", CriticalPathDependency.RETRY_DEPENDENCY), createStep("v2 : 000000_0", CriticalPathDependency.DATA_DEPENDENCY) }; DAG dag = SimpleTestDAG.createDAG("testBasicTaskFailure", testConf); runDAG(dag, DAGStatus.State.SUCCEEDED); return Collections.singletonList(check); }
81. StramClientUtilsTest#testEvalConfiguration()
Project: apex-core
File: StramClientUtilsTest.java
File: StramClientUtilsTest.java
@Test public void testEvalConfiguration() throws Exception { Configuration conf = new Configuration(); conf.set("a.b.c", "123"); conf.set("x.y.z", "foobar"); conf.set("sub.result", "1111 ${a.b.c} xxx ${x.y.z} yyy"); conf.set("script.result", "1111 {% (_prop[\"a.b.c\"] * _prop[\"a.b.c\"]).toFixed(0) %} xxx"); StramClientUtils.evalConfiguration(conf); Assert.assertEquals("1111 123 xxx foobar yyy", conf.get("sub.result")); Assert.assertEquals("1111 15129 xxx", conf.get("script.result")); }
82. EventIncrementerAppTest#testEventIncrementerApp()
Project: apex-malhar
File: EventIncrementerAppTest.java
File: EventIncrementerAppTest.java
@Test public void testEventIncrementerApp() throws FileNotFoundException, IOException { Logger logger = LoggerFactory.getLogger(EventIncrementerAppTest.class); LocalMode lm = LocalMode.newInstance(); Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml"); conf.addResource(is); conf.get("dt.application.EventIncrementerApp.operator.hmapOper.seed"); conf.get("dt.application.EventIncrementerApp.operator.hmapOper.keys"); conf.get("dt.application.EventIncrementerApp.operator.hmapOper.numKeys"); try { lm.prepareDAG(new EventIncrementerApp(), conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { logger.info(ex.getMessage()); } is.close(); }
83. EventGeneratorAppTest#testEventGeneratorApp()
Project: apex-malhar
File: EventGeneratorAppTest.java
File: EventGeneratorAppTest.java
@Test public void testEventGeneratorApp() throws FileNotFoundException, IOException { Logger logger = LoggerFactory.getLogger(EventGeneratorAppTest.class); LocalMode lm = LocalMode.newInstance(); Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml"); conf.addResource(is); conf.get("dt.application.EventGeneratorApp.operator.eventGenerator.keysHelper"); conf.get("dt.application.EventGeneratorApp.operator.eventGenerator.weightsHelper"); conf.get("dt.application.EventGeneratorApp.operator.eventGenerator.valuesHelper"); try { lm.prepareDAG(new EventGeneratorApp(), conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { logger.info(ex.getMessage()); } is.close(); }
84. EventClassifierNumberToHashDoubleAppTest#testEventClassifierNumberToHashDoubleApp()
Project: apex-malhar
File: EventClassifierNumberToHashDoubleAppTest.java
File: EventClassifierNumberToHashDoubleAppTest.java
@Test public void testEventClassifierNumberToHashDoubleApp() throws FileNotFoundException, IOException { Logger logger = LoggerFactory.getLogger(EventClassifierNumberToHashDoubleAppTest.class); LocalMode lm = LocalMode.newInstance(); Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml"); conf.addResource(is); conf.get("dt.application.EventClassifierNumberToHashDoubleApp.operator.eventClassify.key_keys"); conf.get("dt.application.EventClassifierNumberToHashDoubleApp.operator.eventClassify.s_start"); conf.get("dt.application.EventClassifierNumberToHashDoubleApp.operator.eventClassify.s_end"); try { lm.prepareDAG(new EventClassifierNumberToHashDoubleApp(), conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { logger.info(ex.getMessage()); } is.close(); }
85. TestCrcCorruption#testCrcCorruption()
Project: hadoop-common
File: TestCrcCorruption.java
File: TestCrcCorruption.java
public void testCrcCorruption() throws Exception { // // default parameters // System.out.println("TestCrcCorruption with default parameters"); Configuration conf1 = new Configuration(); conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000); DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8 * 1024); thistest(conf1, util1); // // specific parameters // System.out.println("TestCrcCorruption with specific parameters"); Configuration conf2 = new Configuration(); conf2.setInt("io.bytes.per.checksum", 17); conf2.setInt("dfs.block.size", 34); DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400); thistest(conf2, util2); }
86. TestLookasideCache#getCachedHdfs()
Project: hadoop-20
File: TestLookasideCache.java
File: TestLookasideCache.java
/** * Returns a cached filesystem layered on top of the HDFS cluster */ private LookasideCacheFileSystem getCachedHdfs(FileSystem fileSys, Configuration conf, long cacheSize) throws IOException { DistributedFileSystem dfs = (DistributedFileSystem) fileSys; Configuration clientConf = new Configuration(conf); clientConf.setLong(LookasideCache.CACHESIZE, cacheSize); clientConf.set("fs.lookasidecache.dir", TEST_DIR); clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.LookasideCacheFileSystem"); clientConf.set("fs.lookasidecache.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); URI dfsUri = dfs.getUri(); FileSystem.closeAll(); FileSystem lfs = FileSystem.get(dfsUri, clientConf); assertTrue("lfs not an instance of LookasideCacheFileSystem", lfs instanceof LookasideCacheFileSystem); return (LookasideCacheFileSystem) lfs; }
87. MultipleOutputs#addNamedOutput()
Project: hadoop-mapreduce
File: MultipleOutputs.java
File: MultipleOutputs.java
/** * Adds a named output for the job. * <p/> * * @param job job to add the named output * @param namedOutput named output name, it has to be a word, letters * and numbers only, cannot be the word 'part' as * that is reserved for the default output. * @param outputFormatClass OutputFormat class. * @param keyClass key class * @param valueClass value class */ @SuppressWarnings("unchecked") public static void addNamedOutput(Job job, String namedOutput, Class<? extends OutputFormat> outputFormatClass, Class<?> keyClass, Class<?> valueClass) { checkNamedOutputName(job, namedOutput, true); Configuration conf = job.getConfiguration(); conf.set(MULTIPLE_OUTPUTS, conf.get(MULTIPLE_OUTPUTS, "") + " " + namedOutput); conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass, OutputFormat.class); conf.setClass(MO_PREFIX + namedOutput + KEY, keyClass, Object.class); conf.setClass(MO_PREFIX + namedOutput + VALUE, valueClass, Object.class); }
88. DistSum#createJob()
Project: hadoop-mapreduce
File: DistSum.java
File: DistSum.java
/** Create a job */ private Job createJob(String name, Summation sigma) throws IOException { final Job job = new Job(getConf(), parameters.remoteDir + "/" + name); final Configuration jobconf = job.getConfiguration(); job.setJarByClass(DistSum.class); jobconf.setInt(N_PARTS, parameters.nParts); SummationWritable.write(sigma, DistSum.class, jobconf); // disable task timeout jobconf.setLong(JobContext.TASK_TIMEOUT, 0); // do not use speculative execution jobconf.setBoolean(JobContext.MAP_SPECULATIVE, false); jobconf.setBoolean(JobContext.REDUCE_SPECULATIVE, false); return job; }
89. TestGridmixSubmission#initCluster()
Project: hadoop-mapreduce
File: TestGridmixSubmission.java
File: TestGridmixSubmission.java
@BeforeClass public static void initCluster() throws IOException { Configuration conf = new Configuration(); conf.setBoolean(JTConfig.JT_RETIREJOBS, false); conf.setInt(JTConfig.JT_RETIREJOB_CACHE_SIZE, 1000); conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, true); conf.setInt(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, 1); dfsCluster = new MiniDFSCluster(conf, 3, true, null); dfs = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(3, dfs.getUri().toString(), 1, null, null, new JobConf(conf)); }
90. TestCrcCorruption#testCrcCorruption()
Project: hadoop-hdfs
File: TestCrcCorruption.java
File: TestCrcCorruption.java
public void testCrcCorruption() throws Exception { // // default parameters // System.out.println("TestCrcCorruption with default parameters"); Configuration conf1 = new Configuration(); conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000); DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8 * 1024); thistest(conf1, util1); // // specific parameters // System.out.println("TestCrcCorruption with specific parameters"); Configuration conf2 = new Configuration(); conf2.setInt("io.bytes.per.checksum", 17); conf2.setInt("dfs.block.size", 34); DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400); thistest(conf2, util2); }
91. BlockIndexedFileInputFormat#setOptions()
Project: elephant-twin
File: BlockIndexedFileInputFormat.java
File: BlockIndexedFileInputFormat.java
/**Set values for the underlying inputformat class. * We allow indexing job to have filters for debugging/testing purposes. * Thus relying on whether there is a filter to determine * if it is a indexing or searching job is not sufficient anymore. * @param job * @param inputformatClass * @param valueClass * @param indexDir * @param filterConditions * @param indexColumn * @param indexingFlag: true for indexing jobs; false for searching jobs. */ private static void setOptions(Job job, String inputformatClass, String valueClass, String indexDir, String filterConditions, String indexColumn, boolean indexingFlag) { Configuration conf = job.getConfiguration(); conf.set(REALINPUTFORMAT, inputformatClass); conf.set(VALUECLASS, valueClass); conf.set(INDEXDIR, indexDir); if (filterConditions != null) conf.set(FILTERCONDITIONS, filterConditions); if (indexColumn != null) conf.set(COLUMNNAME, indexColumn); conf.setBoolean(INDEXINGJOBFLAG, indexingFlag); }
92. AbstractMRNewApiSearchTest#createConf()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSearchTest.java
File: AbstractMRNewApiSearchTest.java
private Configuration createConf() throws IOException { Configuration conf = HdpBootstrap.hadoopConfig(); HadoopCfgUtils.setGenericOptions(conf); Job job = new Job(conf); job.setInputFormatClass(EsInputFormat.class); job.setOutputFormatClass(PrintStreamOutputFormat.class); job.setOutputKeyClass(Text.class); boolean type = random.nextBoolean(); Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class); job.setOutputValueClass(mapType); conf.set(ConfigurationOptions.ES_QUERY, query); conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata)); conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson)); QueryTestParams.provisionQueries(conf); job.setNumReduceTasks(0); //PrintStreamOutputFormat.stream(conf, Stream.OUT); Configuration cfg = job.getConfiguration(); HdpBootstrap.addProperties(cfg, TestSettings.TESTING_PROPS, false); return cfg; }
93. AbstractMRNewApiSaveTest#testParentChild()
Project: elasticsearch-hadoop
File: AbstractMRNewApiSaveTest.java
File: AbstractMRNewApiSaveTest.java
@Test public void testParentChild() throws Exception { // in ES 2.x, the parent/child relationship needs to be created fresh // hence why we reindex everything again String childIndex = indexPrefix + "child"; String parentIndex = indexPrefix + "mr_parent"; //String mapping = "{ \"" + parentIndex + "\" : {}, \"" + childIndex + "\" : { \"_parent\" : { \"type\" : \"" + parentIndex + "\" }}}"; //RestUtils.putMapping(indexPrefix + "mroldapi/child", StringUtils.toUTF(mapping)); RestUtils.putMapping(indexPrefix + "mrnewapi/child", "org/elasticsearch/hadoop/integration/mr-child.json"); RestUtils.putMapping(indexPrefix + "mrnewapi/parent", StringUtils.toUTF("{\"parent\":{}}")); Configuration conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/mr-parent"); runJob(conf); conf = createConf(); conf.set(ConfigurationOptions.ES_RESOURCE, "mrnewapi/child"); conf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "no"); conf.set(ConfigurationOptions.ES_MAPPING_PARENT, "number"); runJob(conf); }
94. TestAllowFormat#testFormatShouldBeIgnoredForNonFileBasedDirs()
Project: hadoop-20
File: TestAllowFormat.java
File: TestAllowFormat.java
/** * Test to ensure that format is called for non-file journals. */ @Test public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception { Configuration conf = new Configuration(); File nameDir = new File(hdfsDir, "name"); if (nameDir.exists()) { FileUtil.fullyDelete(nameDir); } conf.setBoolean("dfs.namenode.support.allowformat", true); conf.set("dfs.name.edits.journal-plugin" + ".dummy", DummyJournalManager.class.getName()); conf.set("dfs.name.edits.dir", "dummy://test"); conf.set("dfs.name.dir", nameDir.getPath()); NameNode.format(conf, false, true); assertTrue(DummyJournalManager.formatCalled); assertTrue(DummyJournalManager.shouldPromptCalled); }
95. TestCrcCorruption#testCrcCorruption()
Project: hadoop-20
File: TestCrcCorruption.java
File: TestCrcCorruption.java
public void testCrcCorruption() throws Exception { // // default parameters // System.out.println("TestCrcCorruption with default parameters"); Configuration conf1 = new Configuration(); conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000); DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8 * 1024); thistest(conf1, util1); // // specific parameters // System.out.println("TestCrcCorruption with specific parameters"); Configuration conf2 = new Configuration(); conf2.setInt("io.bytes.per.checksum", 17); conf2.setInt("dfs.block.size", 34); DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400); thistest(conf2, util2); }
96. TestNameEditsConfigs#testNameDirPolicyFailure()
Project: hadoop-20
File: TestNameEditsConfigs.java
File: TestNameEditsConfigs.java
// Test dfs.name.dir.policy configuration failure cases private void testNameDirPolicyFailure(int policy, boolean useUri) throws IOException { Configuration conf = null; MiniDFSCluster cluster = null; String prefix = useUri ? "file:" : ""; File nameAndEdits = new File(base_dir, "name_and_edits"); String policyStr = Integer.toString(policy); conf = new Configuration(); conf.set("dfs.name.dir.policy", policyStr); conf.set("dfs.name.dir", prefix + nameAndEdits.getPath()); conf.set("dfs.name.edits.dir", prefix + nameAndEdits.getPath()); try { cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true, null, null, null, null); fail("The startup should fail"); } catch (// expect to fail IOException // expect to fail e) { System.out.println("cluster start failed due to name/edits dir " + "violating policy " + policyStr); } finally { cluster = null; } }
97. TestAvatarStorageSetup#testSameSharedEditsLocation()
Project: hadoop-20
File: TestAvatarStorageSetup.java
File: TestAvatarStorageSetup.java
@Test public void testSameSharedEditsLocation() throws Exception { Configuration conf = new Configuration(); URI img0 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/zero/"); URI img1 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/one/"); URI edit0 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/"); URI edit1 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/"); conf.set("dfs.name.dir.shared0", img0.toString()); conf.set("dfs.name.dir.shared1", img1.toString()); conf.set("dfs.name.edits.dir.shared0", edit0.toString()); conf.set("dfs.name.edits.dir.shared1", edit1.toString()); // local locations for image and edits Collection<URI> namedirs = NNStorageConfiguration.getNamespaceDirs(conf, null); Collection<URI> editsdir = NNStorageConfiguration.getNamespaceEditsDirs(conf, null); try { AvatarStorageSetup.validate(conf, namedirs, editsdir, img0, img1, edit0, edit1); fail("fail of same shared eduts location"); } catch (IOException ex) { assertTrue(ex.getMessage().contains("same edits location")); } }
98. TestAvatarStorageSetup#testSameSharedImageLocation()
Project: hadoop-20
File: TestAvatarStorageSetup.java
File: TestAvatarStorageSetup.java
@Test public void testSameSharedImageLocation() throws Exception { Configuration conf = new Configuration(); URI img0 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/"); URI img1 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/"); URI edit0 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/zero/"); URI edit1 = new URI("qjm://localhost:1234;localhost:1235;localhost:1236/test-id/one/"); conf.set("dfs.name.dir.shared0", img0.toString()); conf.set("dfs.name.dir.shared1", img1.toString()); conf.set("dfs.name.edits.dir.shared0", edit0.toString()); conf.set("dfs.name.edits.dir.shared1", edit1.toString()); // local locations for image and edits Collection<URI> namedirs = NNStorageConfiguration.getNamespaceDirs(conf, null); Collection<URI> editsdir = NNStorageConfiguration.getNamespaceEditsDirs(conf, null); try { AvatarStorageSetup.validate(conf, namedirs, editsdir, img0, img1, edit0, edit1); fail("fail of same shared image location"); } catch (IOException ex) { assertTrue(ex.getMessage().contains("same image location")); } }
99. TestMiniCoronaFederatedJT#testOneRemoteJT()
Project: hadoop-20
File: TestMiniCoronaFederatedJT.java
File: TestMiniCoronaFederatedJT.java
public void testOneRemoteJT() throws Exception { LOG.info("Starting testOneRemoteJT"); String[] racks = "/rack-1".split(","); String[] trackers = "tracker-1".split(","); corona = new MiniCoronaCluster.Builder().numTaskTrackers(1).racks(racks).hosts(trackers).build(); Configuration conf = corona.createJobConf(); conf.set("mapred.job.tracker", "corona"); conf.set("mapred.job.tracker.class", CoronaJobTracker.class.getName()); String locationsCsv = "tracker-1"; conf.set("test.locations", locationsCsv); conf.setBoolean("mapred.coronajobtracker.forceremote", true); Job job = new Job(conf); job.setMapperClass(TstJob.TestMapper.class); job.setInputFormatClass(TstJob.TestInputFormat.class); job.setOutputFormatClass(NullOutputFormat.class); job.setNumReduceTasks(0); job.getConfiguration().set("io.sort.record.pct", "0.50"); job.getConfiguration().set("io.sort.mb", "25"); boolean success = job.waitForCompletion(true); assertTrue("Job did not succeed", success); }
100. TestNNThroughputBenchmark#main()
Project: hadoop-20
File: TestNNThroughputBenchmark.java
File: TestNNThroughputBenchmark.java
/** * This test runs all benchmarks defined in {@link NNThroughputBenchmark}. * @throws Exception */ public static void main(String[] arg) throws Exception { // make the configuration before benchmark Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://0.0.0.0:" + 9000); conf.set("dfs.http.address", "0.0.0.0:0"); Random rand = new Random(); String dir = "/tmp/testNN" + rand.nextInt(Integer.MAX_VALUE); conf.set("dfs.name.dir", dir); conf.set("dfs.name.edits.dir", dir); conf.set("dfs.namenode.support.allowformat", "true"); //conf.set("fs.default.name", "hdfs://0.0.0.0:9000"); NameNode.format(conf); // create the first benchmark NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(arg)); }