Here are the examples of the java api org.apache.hadoop.conf.Configuration.setInt() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
841 Examples
19
Source : TestS3MultipartOutputCommitter.java
with Apache License 2.0
from rdblue
with Apache License 2.0
from rdblue
@Before
public void setupCommitter() throws Exception {
getConfiguration().set("s3.multipart.committer.num-threads", String.valueOf(numThreads));
getConfiguration().set(UPLOAD_UUID, UUID.randomUUID().toString());
this.job = new JobContextImpl(getConfiguration(), JOB_ID);
this.jobCommitter = new MockedS3Committer(S3_OUTPUT_PATH, job);
jobCommitter.setupJob(job);
this.uuid = job.getConfiguration().get(UPLOAD_UUID);
this.tac = new TaskAttemptContextImpl(new Configuration(job.getConfiguration()), AID);
// get the task's configuration copy so modifications take effect
this.conf = tac.getConfiguration();
conf.set("mapred.local.dir", "/tmp/local-0,/tmp/local-1");
conf.setInt(UPLOAD_SIZE, 100);
this.committer = new MockedS3Committer(S3_OUTPUT_PATH, tac);
}
19
Source : TestPrestoS3FileSystem.java
with Apache License 2.0
from openlookeng
with Apache License 2.0
from openlookeng
@SuppressWarnings({ "OverlyStrongTypeCast", "ConstantConditions" })
@Test
public void testGetMetadataRetryCounter() {
int maxRetries = 2;
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
MockAmazonS3 s3 = new MockAmazonS3();
s3.setGetObjectMetadataHttpCode(HTTP_INTERNAL_ERROR);
Configuration configuration = new Configuration();
configuration.set(S3_MAX_BACKOFF_TIME, "1ms");
configuration.set(S3_MAX_RETRY_TIME, "5s");
configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries);
fs.initialize(new URI("s3n://test-bucket/"), configuration);
fs.setS3Client(s3);
fs.getS3ObjectMetadata(new Path("s3n://test-bucket/test"));
} catch (Throwable expected) {
replacedertInstanceOf(expected, AmazonS3Exception.clreplaced);
replacedertEquals(((AmazonS3Exception) expected).getStatusCode(), HTTP_INTERNAL_ERROR);
replacedertEquals(PrestoS3FileSystem.getFileSystemStats().getGetMetadataRetries().getTotalCount(), maxRetries);
}
}
19
Source : TestResourceManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = 30000)
public void testResourceManagerInitConfigValidation() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, -1);
resourceManager = new ResourceManager();
try {
resourceManager.init(conf);
fail("Exception is expected because the global max attempts" + " is negative.");
} catch (YarnRuntimeException e) {
// Exception is expected.
if (!e.getMessage().startsWith("Invalid global max attempts configuration"))
throw e;
}
}
19
Source : TestDeletionService.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testStopWithDelayedTasks() throws Exception {
DeletionService del = new DeletionService(Mockito.mock(ContainerExecutor.clreplaced));
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 60);
try {
del.init(conf);
del.start();
del.delete("dingo", new Path("/does/not/exist"));
} finally {
del.stop();
}
replacedertTrue(del.isTerminated());
}
19
Source : TestDeletionService.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testNoDelete() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> dirs = buildDirs(r, base, 20);
createDirs(new Path("."), dirs);
FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
try {
del.init(conf);
del.start();
for (Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
}
int msecToWait = 20 * 1000;
for (Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait -= 100;
}
replacedertTrue(lfs.util().exists(p));
}
} finally {
del.stop();
}
}
19
Source : TestSwiftFileSystemPartitionedUploads.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
// set the parreplacedion size to 1 KB
conf.setInt(SwiftProtocolConstants.SWIFT_PARreplacedION_SIZE, PART_SIZE);
return conf;
}
19
Source : TestReadPastBuffer.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Get a configuration which a small blocksize reported to callers
* @return a configuration for this test
*/
@Override
public Configuration getConf() {
Configuration conf = super.getConf();
/*
* set to 4KB
*/
conf.setInt(SwiftProtocolConstants.SWIFT_BLOCKSIZE, SWIFT_READ_BLOCKSIZE);
return conf;
}
19
Source : TestCompressionEmulationUtils.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Runs a GridMix data-generation job.
*/
private static void runDataGenJob(Configuration conf, Path tempDir) throws IOException, ClreplacedNotFoundException, InterruptedException {
JobClient client = new JobClient(conf);
// get the local job runner
conf.setInt(MRJobConfig.NUM_MAPS, 1);
Job job = new Job(conf);
CompressionEmulationUtil.configure(job);
job.setInputFormatClreplaced(CustomInputFormat.clreplaced);
// set the output path
FileOutputFormat.setOutputPath(job, tempDir);
// submit and wait for completion
job.submit();
int ret = job.waitForCompletion(true) ? 0 : 1;
replacedertEquals("Job Failed", 0, ret);
}
19
Source : TestDynamicInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testGetSplitRatio() throws Exception {
replacedert.replacedertEquals(1, DynamicInputFormat.getSplitRatio(1, 1000000000));
replacedert.replacedertEquals(2, DynamicInputFormat.getSplitRatio(11000000, 10));
replacedert.replacedertEquals(4, DynamicInputFormat.getSplitRatio(30, 700));
replacedert.replacedertEquals(2, DynamicInputFormat.getSplitRatio(30, 200));
// Tests with negative value configuration
Configuration conf = new Configuration();
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, -1);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, -1);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, -1);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO, -1);
replacedert.replacedertEquals(1, DynamicInputFormat.getSplitRatio(1, 1000000000, conf));
replacedert.replacedertEquals(2, DynamicInputFormat.getSplitRatio(11000000, 10, conf));
replacedert.replacedertEquals(4, DynamicInputFormat.getSplitRatio(30, 700, conf));
replacedert.replacedertEquals(2, DynamicInputFormat.getSplitRatio(30, 200, conf));
// Tests with valid configuration
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, 100);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, 30);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, 10);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO, 53);
replacedert.replacedertEquals(53, DynamicInputFormat.getSplitRatio(3, 200, conf));
}
19
Source : TestSplitters.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setup() {
configuration = new Configuration();
configuration.setInt(MRJobConfig.NUM_MAPS, 2);
}
19
Source : TestFileInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testListStatusNestedNonRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestNestedNonRecursive(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses), localFs);
}
19
Source : TestFileInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testListStatusSimple() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestSimple(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses), localFs);
}
19
Source : TestFileInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testListStatusNestedRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestNestedRecursive(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses), localFs);
}
19
Source : TestFileInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestErrorOnNonExistantDir(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
try {
fif.listStatus(jobConf);
replacedert.fail("Expecting an IOException for a missing Input path");
} catch (IOException e) {
Path expectedExceptionPath = new Path(TEST_ROOT_DIR, "input2");
expectedExceptionPath = localFs.makeQualified(expectedExceptionPath);
replacedert.replacedertTrue(e instanceof InvalidInputException);
replacedert.replacedertEquals("Input path does not exist: " + expectedExceptionPath.toString(), e.getMessage());
}
}
19
Source : TestSetTimes.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test that when access time updates are not needed, the FSNamesystem
* write lock is not taken by getBlockLocations.
* Regression test for HDFS-3981.
*/
@Test(timeout = 60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
try {
// Create empty file in the FSN.
Path p = new Path("/empty-file");
DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short) 1, 0L);
// getBlockLocations() should not need the write lock, since we just created
// the file (and thus its access time is already within the 100-second
// accesstime precision configured above).
MockitoUtil.doThrowWhenCallStackMatches(new replacedertionError("Should not need write lock"), ".*getBlockLocations.*").when(spyLock).writeLock();
cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
} finally {
cluster.shutdown();
}
}
19
Source : TestInjectionForSimulatedStorage.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/* This test makes sure that NameNode retries all the available blocks
* for under replicated blocks. This test uses simulated storage and one
* of its features to inject blocks,
*
* It creates a file with several blocks and replication of 4.
* The cluster is then shut down - NN retains its state but the DNs are
* all simulated and hence loose their blocks.
* The blocks are then injected in one of the DNs. The expected behaviour is
* that the NN will arrange for themissing replica will be copied from a valid source.
*/
@Test
public void testInjection() throws IOException {
MiniDFSCluster cluster = null;
String testFile = "/replication-test-file";
Path testPath = new Path(testFile);
byte[] buffer = new byte[1024];
for (int i = 0; i < buffer.length; i++) {
buffer[i] = '1';
}
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
SimulatedFSDataset.setFactory(conf);
// first time format
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
writeFile(cluster.getFileSystem(), testPath, numDataNodes);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
*/
LOG.info("Restarting minicluster");
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
cluster.waitActive();
Set<Block> uniqueBlocks = new HashSet<Block>();
for (Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
for (BlockListAsLongs blockList : map.values()) {
for (Block b : blockList) {
uniqueBlocks.add(new Block(b));
}
}
}
// Insert all the blocks in the first data node
LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
cluster.injectBlocks(0, uniqueBlocks, null);
dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
19
Source : TestFileStatus.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@BeforeClreplaced
public static void testSetUp() throws Exception {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
cluster = new MiniDFSCluster.Builder(conf).build();
fs = cluster.getFileSystem();
fc = FileContext.getFileContext(cluster.getURI(0), conf);
hftpfs = cluster.getHftpFileSystem(0);
dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
file1 = new Path("filestatus.dat");
writeFile(fs, file1, 1, fileSize, blockSize);
}
19
Source : TestFileAppend4.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setUp() throws Exception {
this.conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// lower heartbeat interval for fast recognition of DN death
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
// handle under-replicated blocks quickly (for replication replacederts)
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// handle failures in the DFSClient pipeline quickly
// (for cluster.shutdown(); fs.close() idiom)
conf.setInt("ipc.client.connect.max.retries", 1);
}
19
Source : TestDataTransferKeepalive.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setup() throws Exception {
conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, KEEPALIVE_TIMEOUT);
conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dn = cluster.getDataNodes().get(0);
}
19
Source : TestCrcCorruption.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testCrcCorruption() throws Exception {
//
// default parameters
//
System.out.println("TestCrcCorruption with default parameters");
Configuration conf1 = new HdfsConfiguration();
conf1.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
DFSTestUtil util1 = new DFSTestUtil.Builder().setName("TestCrcCorruption").setNumFiles(40).build();
thistest(conf1, util1);
//
// specific parameters
//
System.out.println("TestCrcCorruption with specific parameters");
Configuration conf2 = new HdfsConfiguration();
conf2.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 17);
conf2.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 34);
DFSTestUtil util2 = new DFSTestUtil.Builder().setName("TestCrcCorruption").setNumFiles(40).setMaxSize(400).build();
thistest(conf2, util2);
}
19
Source : TestBlockReaderFactory.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
replacedert.replacedertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
replacedert.replacedertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
replacedert.replacedertTrue(vinfo.disabled);
replacedert.replacedertEquals(0, vinfo.full.size());
replacedert.replacedertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
}
19
Source : TestBlockReaderFactory.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
replacedert.replacedertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.dfs.getClientContext().getShortCircuitCache();
replacedert.replacedertEquals(null, cache.getDfsClientShmManager());
cluster.shutdown();
}
19
Source : TestFSDirectory.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
hdfs.mkdirs(sub2);
}
19
Source : TestXAttrsWithHA.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setupCluster() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).waitSafeMode(false).build();
cluster.waitActive();
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
}
19
Source : TestInitializeSharedEdits.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setupCluster() throws IOException {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
shutdownClusterAndRemoveSharedEditsDir();
}
19
Source : TestDFSUpgradeWithHA.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void createConfiguration() {
conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
}
19
Source : TestBootstrapStandbyWithQJM.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Before
public void setup() throws Exception {
Configuration conf = new Configuration();
// Turn off IPC client caching, so that the suite can handle
// the restart of the daemons between test cases.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
cluster = miniQjmHaCluster.getDfsCluster();
jCluster = miniQjmHaCluster.getJournalCluster();
// make nn0 active
cluster.transitionToActive(0);
// do sth to generate in-progress edit log data
DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
dfs.mkdirs(new Path("/test2"));
dfs.close();
}
19
Source : TestDataNodeVolumeFailureToleration.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Restart the datanodes with a new volume tolerated value.
* @param volTolerated number of dfs data dir failures to tolerate
* @param manageDfsDirs whether the mini cluster should manage data dirs
* @throws IOException
*/
private void restartDatanodes(int volTolerated, boolean manageDfsDirs) throws IOException {
// Make sure no datanode is running
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, volTolerated);
cluster.startDataNodes(conf, 1, manageDfsDirs, null, null);
cluster.waitActive();
}
19
Source : TestDataNodeVolumeFailureToleration.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test
public void testConfigureMinValidVolumes() throws Exception {
replacedumeTrue(!System.getProperty("os.name").startsWith("Windows"));
// Bring up two additional datanodes that need both of their volumes
// functioning in order to stay up.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
replacedertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
// Should only get two replicas (the first DN and the 3rd)
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 2);
// Check that this single failure caused a DN to die.
DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);
// If we restore the volume we should still only be able to get
// two replicas since the DN is still considered dead.
replacedertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short) 2);
}
19
Source : TestDataNodeRollingUpgrade.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void startCluster() throws IOException {
conf = new HdfsConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
cluster.waitActive();
fs = cluster.getFileSystem();
nn = cluster.getNameNode(0);
replacedertNotNull(nn);
dn0 = cluster.getDataNodes().get(0);
replacedertNotNull(dn0);
blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
19
Source : TestIPCServerResponder.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public void testResponseBuffer() throws IOException, InterruptedException {
Server.INITIAL_RESP_BUF_SIZE = 1;
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY, 1);
testServerResponder(1, true, 1, 1, 5);
// reset configuration
conf = new Configuration();
}
19
Source : TestTFileSeqFileComparison.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public void setUp() throws IOException {
if (options == null) {
options = new MyOptions(new String[0]);
}
conf = new Configuration();
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSize);
conf.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSize);
Path path = new Path(options.rootDir);
fs = path.getFileSystem(conf);
formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
setUpDictionary();
}
19
Source : TestTFileSeek.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public void setUp() throws IOException {
if (options == null) {
options = new MyOptions(new String[0]);
}
conf = new Configuration();
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSize);
conf.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSize);
path = new Path(new Path(options.rootDir), options.file);
fs = path.getFileSystem(conf);
timer = new NanoTimer(false);
rng = new Random(options.seed);
keyLenGen = new RandomDistribution.Zipf(new Random(rng.nextLong()), options.minKeyLen, options.maxKeyLen, 1.2);
DiscreteRNG valLenGen = new RandomDistribution.Flat(new Random(rng.nextLong()), options.minValLength, options.maxValLength);
DiscreteRNG wordLenGen = new RandomDistribution.Flat(new Random(rng.nextLong()), options.minWordLen, options.maxWordLen);
kvGen = new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen, options.dictSize);
}
19
Source : ApplicationMasterLauncher.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
@Override
protected void serviceInit(Configuration conf) throws Exception {
int threadCount = conf.getInt(YarnConfiguration.RM_AMLAUNCHER_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_AMLAUNCHER_THREAD_COUNT);
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ApplicationMasterLauncher #%d").build();
launcherPool = new ThreadPoolExecutor(threadCount, threadCount, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
launcherPool.setThreadFactory(tf);
Configuration newConf = new YarnConfiguration(conf);
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, conf.getInt(YarnConfiguration.RM_NODEMANAGER_CONNECT_RETIRES, YarnConfiguration.DEFAULT_RM_NODEMANAGER_CONNECT_RETIRES));
setConfig(newConf);
super.serviceInit(newConf);
}
19
Source : TestBlockReaderFactory.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
replacedert.replacedertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
replacedert.replacedertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
replacedert.replacedertTrue(vinfo.disabled);
replacedert.replacedertEquals(0, vinfo.full.size());
replacedert.replacedertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
sockDir.close();
}
19
Source : TestBlockReaderFactory.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
replacedert.replacedertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.dfs.getClientContext().getShortCircuitCache();
replacedert.replacedertEquals(null, cache.getDfsClientShmManager());
cluster.shutdown();
sockDir.close();
}
19
Source : TestRollingWindowManager.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
@Before
public void init() {
conf = new Configuration();
conf.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, BUCKET_CNT);
conf.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
manager = new RollingWindowManager(conf, WINDOW_LEN_MS);
users = new String[2 * N_TOP_USERS];
for (int i = 0; i < users.length; i++) {
users[i] = "user" + i;
}
}
19
Source : TestDataNodeVolumeFailureToleration.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test
public void testConfigureMinValidVolumes() throws Exception {
replacedumeTrue(!System.getProperty("os.name").startsWith("Windows"));
// Bring up two additional datanodes that need both of their volumes
// functioning in order to stay up.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
// Should only get two replicas (the first DN and the 3rd)
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 2);
// Check that this single failure caused a DN to die.
DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);
// If we restore the volume we should still only be able to get
// two replicas since the DN is still considered dead.
DataNodeTestUtils.restoreDataDirFromFailure(dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short) 2);
}
19
Source : TestFsVolumeList.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
@Before
public void setUp() {
dataset = mock(FsDatasetImpl.clreplaced);
baseDir = new FileSystemTestHelper().getTestRootDir();
Configuration blockScannerConf = new Configuration();
blockScannerConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
blockScanner = new BlockScanner(null, blockScannerConf);
}
19
Source : AccumuloRecordWriter.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
public static void setLogLevel(Configuration conf, Level level) {
ArgumentChecker.notNull(level);
conf.setInt(LOGLEVEL, level.toInt());
}
19
Source : AccumuloRecordWriter.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
public static void setMaxWriteThreads(Configuration conf, int numberOfThreads) {
conf.setInt(NUM_WRITE_THREADS, numberOfThreads);
}
19
Source : AccumuloRecordWriter.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
public static void setMaxLatency(Configuration conf, int numberOfMilliseconds) {
conf.setInt(MAX_LATENCY, numberOfMilliseconds);
}
19
Source : ShardIdPartitionerTest.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
@Before
public void setUp() {
conf = new Configuration();
conf.setInt(ShardIdFactory.NUM_SHARDS, 31);
parreplacedioner = new ShardIdParreplacedioner();
parreplacedioner.setConf(conf);
}
19
Source : BalancedShardPartitionerTest.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
@BeforeClreplaced
public static void defineShardLocationsFile() throws IOException {
conf = new Configuration();
conf.setInt(ShardIdFactory.NUM_SHARDS, SHARDS_PER_DAY);
}
19
Source : TablePartitionerOffsetsTest.java
with Apache License 2.0
from NationalSecurityAgency
with Apache License 2.0
from NationalSecurityAgency
@Before
public void before() {
conf = new Configuration();
conf.setInt("splits.num.reduce", NUM_REDUCERS);
}
19
Source : QueryInputFormat.java
with Apache License 2.0
from Merck
with Apache License 2.0
from Merck
public static void addQuery(Configuration conf, String name, String query, int repeatCount) {
Collection<String> qNames = conf.getStringCollection(QUERIES);
qNames.add(name);
conf.set(PREFIX + name + QUERY_SUFFIX, query);
conf.setInt(PREFIX + name + REPEAT_SUFFIX, repeatCount);
conf.setStrings(QUERIES, qNames.toArray(new String[qNames.size()]));
}
19
Source : TestApplicationMasterLauncher.java
with Apache License 2.0
from lsds
with Apache License 2.0
from lsds
@Test
public void testRetriesOnFailures() throws Exception {
final ContainerManagementProtocol mockProxy = mock(ContainerManagementProtocol.clreplaced);
final StartContainersResponse mockResponse = mock(StartContainersResponse.clreplaced);
when(mockProxy.startContainers(any(StartContainersRequest.clreplaced))).thenThrow(new NMNotYetReadyException("foo")).thenReturn(mockResponse);
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
conf.setInt(YarnConfiguration.CLIENT_NM_CONNECT_RETRY_INTERVAL_MS, 1);
final DrainDispatcher dispatcher = new DrainDispatcher();
MockRM rm = new MockRMWithCustomAMLauncher(conf, null) {
@Override
protected ApplicationMasterLauncher createAMLauncher() {
return new ApplicationMasterLauncher(getRMContext()) {
@Override
protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) {
return new AMLauncher(context, application, event, getConfig()) {
@Override
protected YarnRPC getYarnRPC() {
YarnRPC mockRpc = mock(YarnRPC.clreplaced);
when(mockRpc.getProxy(any(Clreplaced.clreplaced), any(InetSocketAddress.clreplaced), any(Configuration.clreplaced))).thenReturn(mockProxy);
return mockRpc;
}
};
}
};
}
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
};
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 5120);
RMApp app = rm.submitApp(2000);
final ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
// kick the scheduling
nm1.nodeHeartbeat(true);
dispatcher.await();
rm.waitForState(appAttemptId, RMAppAttemptState.LAUNCHED, 500);
}
19
Source : TestLineRecordReader.java
with Apache License 2.0
from lsds
with Apache License 2.0
from lsds
@Test
public void testUncompressedInputContainingCRLF() throws Exception {
Configuration conf = new Configuration();
String inputData = "a\r\nb\rc\nd\r\n";
Path inputFile = createInputFile(conf, inputData);
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
19
Source : TestLineRecordReader.java
with Apache License 2.0
from lsds
with Apache License 2.0
from lsds
@Test
public void testUncompressedInput() throws Exception {
Configuration conf = new Configuration();
// single char delimiter, best case
String inputData = "abc+def+ghi+jkl+mno+pqr+stu+vw +xyz";
Path inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// multi char delimiter, best case
inputData = "abc|+|def|+|ghi|+|jkl|+|mno|+|pqr|+|stu|+|vw |+|xyz";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "|+|");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// single char delimiter with empty records
inputData = "abc+def++ghi+jkl++mno+pqr++stu+vw ++xyz";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// multi char delimiter with empty records
inputData = "abc|+||+|defghi|+|jkl|+||+|mno|+|pqr|+||+|stu|+|vw |+||+|xyz";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "|+|");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// multi char delimiter with starting part of the delimiter in the data
inputData = "abc+def+-ghi+jkl+-mno+pqr+-stu+vw +-xyz";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+-");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// multi char delimiter with newline as start of the delimiter
inputData = "abc\n+def\n+ghi\n+jkl\n+mno";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "\n+");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
// multi char delimiter with newline in delimiter and in data
inputData = "abc\ndef+\nghi+\njkl\nmno";
inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+\n");
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
19
Source : TestLineRecordReader.java
with Apache License 2.0
from lsds
with Apache License 2.0
from lsds
@Test
public void testUncompressedInputWithLargeSplitSize() throws Exception {
Configuration conf = new Configuration();
// single char delimiter
String inputData = "abcde +fghij+ klmno+pqrst+uvwxyz";
Path inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+");
// split size over max value of integer
long longSplitSize = (long) Integer.MAX_VALUE + 1;
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testLargeSplitRecordForFile(conf, longSplitSize, inputData.length(), inputFile);
}
}
See More Examples