org.apache.hadoop.fs.Path

Here are the examples of the java api class org.apache.hadoop.fs.Path taken from open source projects.

1. HFileCorruptionChecker#createQuarantinePath()

Project: hindex
Source File: HFileCorruptionChecker.java
View license
/**
   * Given a path, generates a new path to where we move a corrupted hfile (bad
   * trailer, no trailer).
   *
   * @param hFile
   *          Path to a corrupt hfile (assumes that it is HBASE_DIR/ table
   *          /region/cf/file)
   * @return path to where corrupted files are stored. This should be
   *         HBASE_DIR/.corrupt/table/region/cf/file.
   */
Path createQuarantinePath(Path hFile) {
    // extract the normal dirs structure
    Path cfDir = hFile.getParent();
    Path regionDir = cfDir.getParent();
    Path tableDir = regionDir.getParent();
    // build up the corrupted dirs strcture
    Path corruptBaseDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get("hbase.hfile.quarantine.dir", HConstants.CORRUPT_DIR_NAME));
    Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName());
    Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName());
    Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName());
    Path corruptHfile = new Path(corruptFamilyDir, hFile.getName());
    return corruptHfile;
}

2. TestHarFileSystem#setUp()

Project: hadoop-20
Source File: TestHarFileSystem.java
View license
@BeforeClass
public static void setUp() throws Exception {
    dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null);
    fs = dfscluster.getFileSystem();
    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
    inputPath = new Path(fs.getHomeDirectory(), "test");
    inputrelPath = new Path(fs.getHomeDirectory().toUri().getPath().substring(1), "test");
    filea = new Path(inputPath, "a");
    fileb = new Path(inputPath, "b");
    filec = new Path(inputPath, "c c");
    filed = new Path(inputPath, "d%d");
    // check for har containing escape worthy 
    // characters in there names
    archivePath = new Path(fs.getHomeDirectory(), "tmp");
    fs.mkdirs(inputPath);
    CopyFilesBase.createFileWithContent(fs, filea, "a".getBytes());
    CopyFilesBase.createFileWithContent(fs, fileb, "b".getBytes());
    CopyFilesBase.createFileWithContent(fs, filec, "c".getBytes());
    CopyFilesBase.createFileWithContent(fs, filed, "d".getBytes());
}

3. TestHarFileSystem#setUp()

View license
protected void setUp() throws Exception {
    super.setUp();
    dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null);
    fs = dfscluster.getFileSystem();
    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
    inputPath = new Path(fs.getHomeDirectory(), "test");
    inputrelPath = new Path(fs.getHomeDirectory().toUri().getPath().substring(1), "test");
    filea = new Path(inputPath, "a");
    fileb = new Path(inputPath, "b");
    filec = new Path(inputPath, "c");
    archivePath = new Path(fs.getHomeDirectory(), "tmp");
    fs.mkdirs(inputPath);
    FSDataOutputStream out = fs.create(filea);
    out.write("a".getBytes());
    out.close();
    out = fs.create(fileb);
    out.write("b".getBytes());
    out.close();
    out = fs.create(filec);
    out.write("c".getBytes());
    out.close();
}

4. HFileLink#getHFileFromBackReference()

Project: hindex
Source File: HFileLink.java
View license
/**
   * Get the full path of the HFile referenced by the back reference
   *
   * @param rootDir root hbase directory
   * @param linkRefPath Link Back Reference path
   * @return full path of the referenced hfile
   * @throws IOException on unexpected error.
   */
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
    int separatorIndex = linkRefPath.getName().indexOf('.');
    String linkRegionName = linkRefPath.getName().substring(0, separatorIndex);
    String linkTableName = linkRefPath.getName().substring(separatorIndex + 1);
    String hfileName = getBackReferenceFileName(linkRefPath.getParent());
    Path familyPath = linkRefPath.getParent().getParent();
    Path regionPath = familyPath.getParent();
    Path tablePath = regionPath.getParent();
    String linkName = createHFileLinkName(tablePath.getName(), regionPath.getName(), hfileName);
    Path linkTableDir = FSUtils.getTablePath(rootDir, linkTableName);
    Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
    return new Path(new Path(regionDir, familyPath.getName()), linkName);
}

5. TestHdfsSpout#testSimpleText_noACK()

View license
@Test
public void testSimpleText_noACK() throws IOException {
    Path file1 = new Path(source.toString() + "/file1.txt");
    createTextFile(file1, 5);
    Path file2 = new Path(source.toString() + "/file2.txt");
    createTextFile(file2, 5);
    Map conf = getDefaultConfig();
    conf.put(Configs.COMMIT_FREQ_COUNT, "1");
    conf.put(Configs.COMMIT_FREQ_SEC, "1");
    HdfsSpout spout = makeSpout(0, conf, Configs.TEXT, TextFileReader.defaultFields);
    runSpout(spout, "r11");
    Path arc1 = new Path(archive.toString() + "/file1.txt");
    Path arc2 = new Path(archive.toString() + "/file2.txt");
    checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2);
}

6. TestHdfsSpout#testSimpleText_ACK()

View license
@Test
public void testSimpleText_ACK() throws IOException {
    Path file1 = new Path(source.toString() + "/file1.txt");
    createTextFile(file1, 5);
    Path file2 = new Path(source.toString() + "/file2.txt");
    createTextFile(file2, 5);
    Map conf = getDefaultConfig();
    conf.put(Configs.COMMIT_FREQ_COUNT, "1");
    conf.put(Configs.COMMIT_FREQ_SEC, "1");
    // enable acking
    conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "1");
    HdfsSpout spout = makeSpout(0, conf, Configs.TEXT, TextFileReader.defaultFields);
    // consume file 1
    runSpout(spout, "r6", "a0", "a1", "a2", "a3", "a4");
    Path arc1 = new Path(archive.toString() + "/file1.txt");
    checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1);
    // consume file 2
    runSpout(spout, "r6", "a5", "a6", "a7", "a8", "a9");
    Path arc2 = new Path(archive.toString() + "/file2.txt");
    checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2);
}

7. TestFileSystemMetadataProvider#testCreateMetadataFiles()

View license
@Test
public void testCreateMetadataFiles() throws IOException {
    ensureCreated();
    Path namedDirectory = new Path(testDirectory, NAME);
    Path metadataDirectory = new Path(namedDirectory, ".metadata");
    Path propertiesFile = new Path(metadataDirectory, "descriptor.properties");
    Path schemaFile = new Path(metadataDirectory, "schema.avsc");
    Assert.assertTrue("Named directory should exist for name:" + NAME, fileSystem.exists(namedDirectory));
    Assert.assertTrue("Metadata directory should exist", fileSystem.exists(metadataDirectory));
    Assert.assertTrue("Descriptor properties file should exist", fileSystem.exists(propertiesFile));
    Assert.assertTrue("Descriptor schema file should exist", fileSystem.exists(schemaFile));
}

8. TestFileSystemMetadataProvider#testDeleteRemovesMetadataFiles()

View license
@Test
public void testDeleteRemovesMetadataFiles() throws IOException {
    testCreateMetadataFiles();
    DatasetDescriptor loaded = provider.load(NAME);
    Path namedDirectory = new Path(loaded.getLocation());
    Path metadataDirectory = new Path(namedDirectory, ".metadata");
    Path propertiesFile = new Path(metadataDirectory, "descriptor.properties");
    Path schemaFile = new Path(metadataDirectory, "schema.avsc");
    boolean result = provider.delete(NAME);
    Assert.assertTrue(result);
    Assert.assertFalse("Descriptor properties file should not exist", fileSystem.exists(propertiesFile));
    Assert.assertFalse("Descriptor schema file should not exist", fileSystem.exists(schemaFile));
    Assert.assertFalse("Metadata directory should not exist", fileSystem.exists(metadataDirectory));
    Assert.assertTrue("Named directory should still exist for name:" + NAME, fileSystem.exists(namedDirectory));
}

9. TestImpersonationQueries#createRecordReadersData()

Project: drill
Source File: TestImpersonationQueries.java
View license
private static void createRecordReadersData(String user, String group) throws Exception {
    // copy sequence file
    updateClient(user);
    Path localFile = new Path(FileUtils.getResourceAsFile("/sequencefiles/simple.seq").toURI().toString());
    Path dfsFile = new Path(getUserHome(user), "simple.seq");
    fs.copyFromLocalFile(localFile, dfsFile);
    fs.setOwner(dfsFile, user, group);
    fs.setPermission(dfsFile, new FsPermission((short) 0700));
    localFile = new Path(AvroTestUtil.generateSimplePrimitiveSchema_NoNullValues().getFilePath());
    dfsFile = new Path(getUserHome(user), "simple.avro");
    fs.copyFromLocalFile(localFile, dfsFile);
    fs.setOwner(dfsFile, user, group);
    fs.setPermission(dfsFile, new FsPermission((short) 0700));
}

10. FilteredCopyListingTest#testRunNoPattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunNoPattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    new FilteredCopyListing(new Configuration(), CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, -1);
}

11. FilteredCopyListingTest#testRunStarPattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunStarPattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    Configuration configuration = new Configuration();
    configuration.set("falcon.include.path", "*/3/*");
    new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, 3);
}

12. FilteredCopyListingTest#testRunQuestionPattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunQuestionPattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    Configuration configuration = new Configuration();
    configuration.set("falcon.include.path", "*/3/?");
    new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, 2);
}

13. FilteredCopyListingTest#testRunRangePattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunRangePattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    Configuration configuration = new Configuration();
    configuration.set("falcon.include.path", "*/3/[47]");
    new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, 2);
}

14. FilteredCopyListingTest#testRunSpecificPattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunSpecificPattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    Configuration configuration = new Configuration();
    configuration.set("falcon.include.path", "*/3/40");
    new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, 1);
}

15. FilteredCopyListingTest#testRunListPattern()

Project: falcon
Source File: FilteredCopyListingTest.java
View license
@Test
public void testRunListPattern() throws Exception {
    final URI uri = FileSystem.getLocal(new Configuration()).getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "///tmp/source");
    Path target = new Path(fileSystemPath.toString() + "///tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setSyncFolder(true);
    Configuration configuration = new Configuration();
    configuration.set("falcon.include.path", "*/3/{4,7}");
    new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath, 2);
}

16. SimpleDatasetsFinder#findDistinctDatasets()

Project: gobblin
Source File: SimpleDatasetsFinder.java
View license
/**
   * Create a dataset using [email protected] #inputDir} and [email protected] #destDir}.
   * Set dataset input path to be [email protected] #destDir} if [email protected] #recompactDatasets} is true.
   */
@Override
public Set<Dataset> findDistinctDatasets() throws IOException {
    Set<Dataset> datasets = Sets.newHashSet();
    Path inputPath = new Path(this.inputDir);
    Path inputLatePath = new Path(inputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX);
    Path outputPath = new Path(this.destDir);
    Path outputLatePath = new Path(outputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX);
    Dataset dataset = new Dataset.Builder().withPriority(this.getDatasetPriority(inputPath.getName())).withLateDataThresholdForRecompact(this.getDatasetRecompactThreshold(inputPath.getName())).withInputPath(this.recompactDatasets ? outputPath : inputPath).withInputLatePath(this.recompactDatasets ? outputLatePath : inputLatePath).withOutputPath(outputPath).withOutputLatePath(outputLatePath).withOutputTmpPath(new Path(this.tmpOutputDir)).build();
    datasets.add(dataset);
    return datasets;
}

17. FsRenameCommitStepTest#setUp()

Project: gobblin
Source File: FsRenameCommitStepTest.java
View license
@BeforeClass
public void setUp() throws IOException {
    this.fs = FileSystem.getLocal(new Configuration());
    this.fs.delete(new Path(ROOT_DIR), true);
    Path dir1 = new Path(ROOT_DIR, "dir1");
    Path dir2 = new Path(ROOT_DIR, "dir2");
    this.fs.mkdirs(dir1);
    this.fs.mkdirs(dir2);
    Path src = new Path(dir1, "file");
    Path dst = new Path(dir2, "file");
    this.fs.createNewFile(src);
    this.step = (FsRenameCommitStep) new FsRenameCommitStep.Builder<>().from(src).to(dst).withProps(new State()).build();
}

18. MultipleOutputFormat#getInputFileBasedOutputFileName()

Project: hadoop-20
Source File: MultipleOutputFormat.java
View license
/**
   * Generate the outfile name based on a given anme and the input file name. If
   * the map input file does not exists (i.e. this is not for a map only job),
   * the given name is returned unchanged. If the config value for
   * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
   * name is returned unchanged. Otherwise, return a file name consisting of the
   * N trailing legs of the input file name where N is the config value for
   * "num.of.trailing.legs.to.use".
   * 
   * @param job
   *          the job config
   * @param name
   *          the output file name
   * @return the outfile name based on a given anme and the input file name.
   */
protected String getInputFileBasedOutputFileName(JobConf job, String name) {
    String infilepath = job.get("map.input.file");
    if (infilepath == null) {
        // if the map input file does not exists, then return the given name
        return name;
    }
    int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);
    if (numOfTrailingLegsToUse <= 0) {
        return name;
    }
    Path infile = new Path(infilepath);
    Path parent = infile.getParent();
    String midName = infile.getName();
    Path outPath = new Path(midName);
    for (int i = 1; i < numOfTrailingLegsToUse; i++) {
        if (parent == null)
            break;
        midName = parent.getName();
        if (midName.length() == 0)
            break;
        parent = parent.getParent();
        outPath = new Path(midName, outPath);
    }
    return outPath.toString();
}

19. TestDistributedCache#setUp()

Project: hadoop-20
Source File: TestDistributedCache.java
View license
/**
   * @see TestCase#setUp()
   */
@Override
protected void setUp() throws IOException {
    conf = new Configuration();
    conf.setLong("local.cache.size", LOCAL_CACHE_LIMIT);
    conf.set("mapred.local.dir", MAPRED_LOCAL_DIR);
    conf.setLong("local.cache.numbersubdir", LOCAL_CACHE_FILES);
    FileUtil.fullyDelete(new File(TEST_CACHE_BASE_DIR));
    FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
    localfs = FileSystem.get(LOCAL_FS, conf);
    firstCacheFile = new Path(TEST_ROOT_DIR + "/firstcachefile");
    secondCacheFile = new Path(TEST_ROOT_DIR + "/secondcachefile");
    thirdCacheFile = new Path(TEST_ROOT_DIR + "/thirdcachefile");
    fourthCacheFile = new Path(TEST_ROOT_DIR + "/fourthcachefile");
    createTempFile(localfs, firstCacheFile, 4 * 1024);
    createTempFile(localfs, secondCacheFile, 2 * 1024);
    createTempFile(localfs, thirdCacheFile, 1);
    createTempFile(localfs, fourthCacheFile, 1);
}

20. TestHarFileSystem#testSpaces()

Project: hadoop-20
Source File: TestHarFileSystem.java
View license
@Test
public void testSpaces() throws Exception {
    fs.delete(archivePath, true);
    Configuration conf = mapred.createJobConf();
    HadoopArchives har = new HadoopArchives(conf);
    String[] args = new String[6];
    args[0] = "-archiveName";
    args[1] = "foo bar.har";
    args[2] = "-p";
    args[3] = fs.getHomeDirectory().toString();
    args[4] = "test";
    args[5] = archivePath.toString();
    int ret = ToolRunner.run(har, args);
    assertTrue("failed test", ret == 0);
    Path finalPath = new Path(archivePath, "foo bar.har");
    Path fsPath = new Path(inputPath.toUri().getPath());
    Path filePath = new Path(finalPath, "test");
    // make it a har path
    Path harPath = new Path("har://" + filePath.toUri().getPath());
    FileSystem harFs = harPath.getFileSystem(conf);
    FileStatus[] statuses = harFs.listStatus(finalPath);
}

21. TestDecommission#setup()

Project: hadoop-20
Source File: TestDecommission.java
View license
@Before
public void setup() throws IOException {
    conf = new Configuration();
    // Set up the hosts/exclude files.
    localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir, "build/test/data/work-dir/decommission/");
    hostsFile = new Path(dir, "hosts");
    excludeFile = new Path(dir, "exclude");
    cleanFile(hostsFile);
    cleanFile(excludeFile);
    // Setup conf
    conf.setBoolean("dfs.replication.considerLoad", false);
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    conf.setInt("heartbeat.recheck.interval", 2000);
    conf.setInt("dfs.heartbeat.interval", HEARTBEAT_INTERVAL);
    conf.setInt("dfs.replication.pending.timeout.sec", 4);
    writeConfigFile(excludeFile, null);
}

22. MultipleOutputFormat#getInputFileBasedOutputFileName()

View license
/**
   * Generate the outfile name based on a given anme and the input file name. If
   * the map input file does not exists (i.e. this is not for a map only job),
   * the given name is returned unchanged. If the config value for
   * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
   * name is returned unchanged. Otherwise, return a file name consisting of the
   * N trailing legs of the input file name where N is the config value for
   * "num.of.trailing.legs.to.use".
   * 
   * @param job
   *          the job config
   * @param name
   *          the output file name
   * @return the outfile name based on a given anme and the input file name.
   */
protected String getInputFileBasedOutputFileName(JobConf job, String name) {
    String infilepath = job.get("map.input.file");
    if (infilepath == null) {
        // if the map input file does not exists, then return the given name
        return name;
    }
    int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);
    if (numOfTrailingLegsToUse <= 0) {
        return name;
    }
    Path infile = new Path(infilepath);
    Path parent = infile.getParent();
    String midName = infile.getName();
    Path outPath = new Path(midName);
    for (int i = 1; i < numOfTrailingLegsToUse; i++) {
        if (parent == null)
            break;
        midName = parent.getName();
        if (midName.length() == 0)
            break;
        parent = parent.getParent();
        outPath = new Path(midName, outPath);
    }
    return outPath.toString();
}

23. MultipleOutputFormat#getInputFileBasedOutputFileName()

View license
/**
   * Generate the outfile name based on a given anme and the input file name. If
   * the [email protected] JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job),
   * the given name is returned unchanged. If the config value for
   * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
   * name is returned unchanged. Otherwise, return a file name consisting of the
   * N trailing legs of the input file name where N is the config value for
   * "num.of.trailing.legs.to.use".
   * 
   * @param job
   *          the job config
   * @param name
   *          the output file name
   * @return the outfile name based on a given anme and the input file name.
   */
protected String getInputFileBasedOutputFileName(JobConf job, String name) {
    String infilepath = job.get(JobContext.MAP_INPUT_FILE);
    if (infilepath == null) {
        // then return the given name
        return name;
    }
    int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);
    if (numOfTrailingLegsToUse <= 0) {
        return name;
    }
    Path infile = new Path(infilepath);
    Path parent = infile.getParent();
    String midName = infile.getName();
    Path outPath = new Path(midName);
    for (int i = 1; i < numOfTrailingLegsToUse; i++) {
        if (parent == null)
            break;
        midName = parent.getName();
        if (midName.length() == 0)
            break;
        parent = parent.getParent();
        outPath = new Path(midName, outPath);
    }
    return outPath.toString();
}

24. IndexLoadIncrementalHFile#splitStoreFile()

View license
protected List<LoadQueueItem> splitStoreFile(final LoadQueueItem item, final HTable table, byte[] startKey, byte[] splitKey) throws IOException {
    final Path hfilePath = item.hfilePath;
    // We use a '_' prefix which is ignored when walking directory trees
    // above.
    final Path tmpDir = new Path(item.hfilePath.getParent(), "_tmp");
    LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting...");
    String uniqueName = getUniqueName(table.getTableName());
    HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family);
    Path botOut = new Path(tmpDir, uniqueName + ".bottom");
    Path topOut = new Path(tmpDir, uniqueName + ".top");
    splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
    // Add these back at the *front* of the queue, so there's a lower
    // chance that the region will just split again before we get there.
    List<LoadQueueItem> lqis = new ArrayList<LoadQueueItem>(2);
    lqis.add(new LoadQueueItem(item.family, botOut));
    lqis.add(new LoadQueueItem(item.family, topOut));
    LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut);
    return lqis;
}

25. TestLoadIncrementalHFiles#testSplitStoreFile()

View license
@Test
public void testSplitStoreFile() throws IOException {
    Path dir = util.getDataTestDir("testSplitHFile");
    FileSystem fs = util.getTestFileSystem();
    Path testIn = new Path(dir, "testhfile");
    HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
    createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
    Path bottomOut = new Path(dir, "bottom.out");
    Path topOut = new Path(dir, "top.out");
    LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc, Bytes.toBytes("ggg"), bottomOut, topOut);
    int rowCount = verifyHFile(bottomOut);
    rowCount += verifyHFile(topOut);
    assertEquals(1000, rowCount);
}

26. TestCatalogJanitor#createReferences()

Project: hindex
Source File: TestCatalogJanitor.java
View license
/**
   * @param services Master services instance.
   * @param htd
   * @param parent
   * @param daughter
   * @param midkey
   * @param top True if we are to write a 'top' reference.
   * @return Path to reference we created.
   * @throws IOException
   */
private Path createReferences(final MasterServices services, final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte[] midkey, final boolean top) throws IOException {
    Path rootdir = services.getMasterFileSystem().getRootDir();
    Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
    Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(), htd.getColumnFamilies()[0].getName());
    Reference ref = new Reference(midkey, top ? Reference.Range.top : Reference.Range.bottom);
    long now = System.currentTimeMillis();
    // Reference name has this format: StoreFile#REF_NAME_PARSER
    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
    FileSystem fs = services.getMasterFileSystem().getFileSystem();
    ref.write(fs, p);
    return p;
}

27. TestFSTableDescriptors#testSequenceidAdvancesOnTableInfo()

Project: hindex
Source File: TestFSTableDescriptors.java
View license
@Test
public void testSequenceidAdvancesOnTableInfo() throws IOException {
    Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
    HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
    Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
    int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
    Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
    // Assert we cleaned up the old file.
    assertTrue(!fs.exists(p0));
    int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
    assertTrue(i1 == i0 + 1);
    Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
    // Assert we cleaned up the old file.
    assertTrue(!fs.exists(p1));
    int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
    assertTrue(i2 == i1 + 1);
}

28. TestFSTableDescriptors#testFormatTableInfoSequenceId()

Project: hindex
Source File: TestFSTableDescriptors.java
View license
@Test
public void testFormatTableInfoSequenceId() {
    Path p0 = assertWriteAndReadSequenceid(0);
    // Assert p0 has format we expect.
    StringBuilder sb = new StringBuilder();
    for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
        sb.append("0");
    }
    assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(), p0.getName());
    // Check a few more.
    Path p2 = assertWriteAndReadSequenceid(2);
    Path p10000 = assertWriteAndReadSequenceid(10000);
    // Get a .tablinfo that has no sequenceid suffix.
    Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
    FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
    FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
    FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
    FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
    FSTableDescriptors.FileStatusFileNameComparator comparator = new FSTableDescriptors.FileStatusFileNameComparator();
    assertTrue(comparator.compare(fs, fs0) > 0);
    assertTrue(comparator.compare(fs0, fs2) > 0);
    assertTrue(comparator.compare(fs2, fs10000) > 0);
}

29. TestTezCommonUtils#testTezDAGRecoveryStagingPath()

View license
// Testing DAG specific recovery path staging dir
@Test
public void testTezDAGRecoveryStagingPath() throws Exception {
    String strAppId = "testAppId";
    Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId);
    Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf);
    Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2);
    Path dagRecoveryPathj = TezCommonUtils.getDAGRecoveryPath(recoveryStageDir, "dag_123");
    String expectedDir = RESOLVED_STAGE_DIR + File.separatorChar + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + File.separatorChar + strAppId + File.separator + TezConfiguration.DAG_RECOVERY_DATA_DIR_NAME + File.separator + "2" + File.separator + "dag_123" + TezConfiguration.DAG_RECOVERY_RECOVER_FILE_SUFFIX;
    Assert.assertEquals(dagRecoveryPathj.toString(), expectedDir);
}

30. TestTezCommonUtils#testTezSummaryRecoveryStagingPath()

View license
// Testing Summary recovery path staging dir
@Test
public void testTezSummaryRecoveryStagingPath() throws Exception {
    String strAppId = "testAppId";
    Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId);
    Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf);
    Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2);
    Path summaryRecoveryPathj = TezCommonUtils.getSummaryRecoveryPath(recoveryStageDir);
    String expectedDir = RESOLVED_STAGE_DIR + File.separatorChar + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + File.separatorChar + strAppId + File.separator + TezConfiguration.DAG_RECOVERY_DATA_DIR_NAME + File.separator + "2" + File.separator + TezConfiguration.DAG_RECOVERY_SUMMARY_FILE_SUFFIX;
    Assert.assertEquals(summaryRecoveryPathj.toString(), expectedDir);
}

31. TestCSVFileReader#createCSVFiles()

Project: kite
Source File: TestCSVFileReader.java
View license
@BeforeClass
public static void createCSVFiles() throws IOException {
    localfs = LocalFileSystem.getInstance();
    csvFile = new Path("target/temp.csv");
    reorderedFile = new Path("target/reordered.csv");
    tsvFile = new Path("target/temp.tsv");
    validatorFile = new Path("target/validator.csv");
    FSDataOutputStream out = localfs.create(csvFile, true);
    out.writeBytes(CSV_CONTENT);
    out.close();
    out = localfs.create(reorderedFile, true);
    out.writeBytes(REORDERED_CSV_CONTENT);
    out.close();
    out = localfs.create(validatorFile, true);
    out.writeBytes(VALIDATOR_CSV_CONTENT);
    out.close();
    out = localfs.create(tsvFile, true);
    out.writeBytes(TSV_CONTENT);
    out.close();
}

32. TestFileSystemMetadataProvider#testCreateMetadataFiles()

View license
@Test
public void testCreateMetadataFiles() throws IOException {
    ensureCreated();
    Path namedDirectory = new Path(testDirectory, new Path(NAMESPACE, NAME));
    Path metadataDirectory = new Path(namedDirectory, ".metadata");
    Path propertiesFile = new Path(metadataDirectory, "descriptor.properties");
    Path schemaDirectory = new Path(metadataDirectory, "schemas");
    Assert.assertTrue("Named directory should exist for name:" + NAME, fileSystem.exists(namedDirectory));
    Assert.assertTrue("Metadata directory should exist", fileSystem.exists(metadataDirectory));
    Assert.assertTrue("Descriptor properties file should exist", fileSystem.exists(propertiesFile));
    Assert.assertTrue("Descriptor schema directory should exist", fileSystem.exists(schemaDirectory));
}

33. TestFileSystemMetadataProvider#testDeleteRemovesMetadataFiles()

View license
@Test
public void testDeleteRemovesMetadataFiles() throws IOException {
    testCreateMetadataFiles();
    DatasetDescriptor loaded = provider.load(NAMESPACE, NAME);
    Path namedDirectory = new Path(loaded.getLocation());
    Path metadataDirectory = new Path(namedDirectory, ".metadata");
    Path propertiesFile = new Path(metadataDirectory, "descriptor.properties");
    Path schemaDirectory = new Path(metadataDirectory, "schemas");
    boolean result = provider.delete(NAMESPACE, NAME);
    Assert.assertTrue(result);
    Assert.assertFalse("Descriptor properties file should not exist", fileSystem.exists(propertiesFile));
    Assert.assertFalse("Descriptor schema directory should not exist", fileSystem.exists(schemaDirectory));
    Assert.assertFalse("Metadata directory should not exist", fileSystem.exists(metadataDirectory));
    Assert.assertTrue("Named directory should still exist for name:" + NAME, fileSystem.exists(namedDirectory));
}

34. TestFileSystemMetadataProvider#testUpdatePreviousFormat()

View license
@Test
public void testUpdatePreviousFormat() throws IOException {
    useOldRepositoryFormat();
    DatasetDescriptor oldFormatDescriptor = provider.load(NAMESPACE, NAME);
    Path namedDirectory = new Path(oldFormatDescriptor.getLocation());
    Path metadataDirectory = new Path(namedDirectory, ".metadata");
    Path schemaDirectory = new Path(metadataDirectory, "schemas");
    Path newSchemaLocation = new Path(schemaDirectory, "1.avsc");
    // Performing an update against a dataset in the old location should bring it
    // into the new location.
    DatasetDescriptor updated = new DatasetDescriptor.Builder(oldFormatDescriptor).build();
    provider.update(NAMESPACE, NAME, updated);
    Assert.assertEquals(testDescriptor.getSchema(), oldFormatDescriptor.getSchema());
    Assert.assertTrue("Schema should exist at the new location.", fileSystem.exists(newSchemaLocation));
}

35. DataProviderFactoryTest#testGetVectorDataProviderForOverwriteWhenExists()

Project: mrgeo
Source File: DataProviderFactoryTest.java
View license
@Test
@Category(UnitTest.class)
public void testGetVectorDataProviderForOverwriteWhenExists() throws IOException {
    // Using an existing resource
    Path tmpDir = HadoopFileUtils.createUniqueTmp();
    Path testTsvPath = new Path(test_tsv);
    HadoopFileUtils.copyToHdfs(testTsvPath.getParent(), tmpDir, test_tsv_filename);
    HadoopFileUtils.copyToHdfs(testTsvPath.getParent(), tmpDir, test_tsv_columns_filename);
    Path hdfsTsvPath = new Path(tmpDir, test_tsv_filename);
    Path hdfsColumnsPath = new Path(tmpDir, test_tsv_columns_filename);
    try {
        VectorDataProvider dp = DataProviderFactory.getVectorDataProvider(hdfsTsvPath.toString(), AccessMode.OVERWRITE, providerProperties);
        Assert.assertNotNull(dp);
        Assert.assertFalse(HadoopFileUtils.exists(hdfsTsvPath));
        Assert.assertFalse(HadoopFileUtils.exists(hdfsColumnsPath));
    } finally {
        HadoopFileUtils.delete(tmpDir);
    }
}

36. TestTezCommonUtils#testTezDAGRecoveryStagingPath()

Project: tez
Source File: TestTezCommonUtils.java
View license
// Testing DAG specific recovery path staging dir
@Test(timeout = 5000)
public void testTezDAGRecoveryStagingPath() throws Exception {
    String strAppId = "testAppId";
    Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId);
    Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf);
    Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2);
    Path dagRecoveryPathj = TezCommonUtils.getDAGRecoveryPath(recoveryStageDir, "dag_123");
    String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME + Path.SEPARATOR + "2" + Path.SEPARATOR + "dag_123" + TezConstants.DAG_RECOVERY_RECOVER_FILE_SUFFIX;
    Assert.assertEquals(expectedDir, dagRecoveryPathj.toString());
}

37. TestTezCommonUtils#testTezSummaryRecoveryStagingPath()

Project: tez
Source File: TestTezCommonUtils.java
View license
// Testing Summary recovery path staging dir
@Test(timeout = 5000)
public void testTezSummaryRecoveryStagingPath() throws Exception {
    String strAppId = "testAppId";
    Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId);
    Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf);
    Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2);
    Path summaryRecoveryPathj = TezCommonUtils.getSummaryRecoveryPath(recoveryStageDir);
    String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME + Path.SEPARATOR + "2" + Path.SEPARATOR + TezConstants.DAG_RECOVERY_SUMMARY_FILE_SUFFIX;
    Assert.assertEquals(expectedDir, summaryRecoveryPathj.toString());
}

38. HadoopFileCacheRepository#computeCachePath()

View license
private Path computeCachePath(Path file) {
    assert repository != null;
    String directoryName;
    Path parent = file.getParent();
    if (parent == null) {
        //$NON-NLS-1$
        directoryName = String.format("%08x", 0);
    } else {
        //$NON-NLS-1$
        directoryName = String.format("%08x", parent.toString().hashCode());
    }
    Path directory = new Path(repository, directoryName);
    Path target = new Path(directory, file.getName());
    return target;
}

39. TemporaryOutputRetriever#truncate()

View license
@Override
public void truncate(TemporaryOutputDescription description, TestContext context) throws IOException {
    //$NON-NLS-1$
    LOG.debug("Deleting output directory: {}", description);
    VariableTable variables = createVariables(context);
    Configuration config = configurations.newInstance();
    FileSystem fs = FileSystem.get(config);
    String resolved = variables.parse(description.getPathPrefix(), false);
    Path path = new Path(resolved);
    Path output = path.getParent();
    Path target;
    if (output == null) {
        LOG.warn(MessageFormat.format(//$NON-NLS-1$
        Messages.getString("TemporaryOutputRetriever.warnDeleteBaseDirectory"), path));
        target = fs.makeQualified(path);
    } else {
        //$NON-NLS-1$
        LOG.debug("output directory will be deleted: {}", output);
        target = fs.makeQualified(output);
    }
    TemporaryInputPreparator.delete(fs, target);
}

40. WindGateHadoopGetTest#multiple()

View license
/**
     * Gets multiple files.
     * @throws Exception if failed
     */
@Test
public void multiple() throws Exception {
    Path path1 = new Path(PREFIX, "testing-1");
    Path path2 = new Path(PREFIX, "testing-2");
    Path path3 = new Path(PREFIX, "testing-3");
    put(path1, "Hello1, world!");
    put(path2, "Hello2, world!");
    put(path3, "Hello3, world!");
    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
    int result = new WindGateHadoopGet(conf).execute(buffer, path1.toString(), path2.toString(), path3.toString());
    assertThat(result, is(0));
    Map<String, String> contents = get(buffer.toByteArray());
    assertThat(contents.size(), is(3));
    assertThat(contents.get("testing-1"), is("Hello1, world!"));
    assertThat(contents.get("testing-2"), is("Hello2, world!"));
    assertThat(contents.get("testing-3"), is("Hello3, world!"));
}

41. WindGateHadoopGetTest#glob()

View license
/**
     * Gets multiple files using glob.
     * @throws Exception if failed
     */
@Test
public void glob() throws Exception {
    Path path1 = new Path(PREFIX, "testing-1");
    Path path2 = new Path(PREFIX, "testing-2");
    Path path3 = new Path(PREFIX, "testing-3");
    put(path1, "Hello1, world!");
    put(path2, "Hello2, world!");
    put(path3, "Hello3, world!");
    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
    int result = new WindGateHadoopGet(conf).execute(buffer, new Path(PREFIX, "testing-*").toString());
    assertThat(result, is(0));
    Map<String, String> contents = get(buffer.toByteArray());
    assertThat(contents.size(), is(3));
    assertThat(contents.get("testing-1"), is("Hello1, world!"));
    assertThat(contents.get("testing-2"), is("Hello2, world!"));
    assertThat(contents.get("testing-3"), is("Hello3, world!"));
}

42. TestCSVFileReader#createCSVFiles()

Project: cdk
Source File: TestCSVFileReader.java
View license
@BeforeClass
public static void createCSVFiles() throws IOException {
    localfs = FileSystem.getLocal(new Configuration());
    csvFile = new Path("target/temp.csv");
    tsvFile = new Path("target/temp.tsv");
    validatorFile = new Path("target/validator.csv");
    FSDataOutputStream out = localfs.create(csvFile, true);
    out.writeBytes(CSV_CONTENT);
    out.close();
    out = localfs.create(validatorFile, true);
    out.writeBytes(VALIDATOR_CSV_CONTENT);
    out.close();
    out = localfs.create(tsvFile, true);
    out.writeBytes(TSV_CONTENT);
    out.close();
}

43. PailInputSplit#setRelPath()

Project: dfs-datastores
Source File: PailInputSplit.java
View license
private void setRelPath(FileSystem fs, String root) {
    Path filePath = super.getPath();
    filePath = filePath.makeQualified(fs);
    Path rootPath = new Path(root).makeQualified(fs);
    List<String> dirs = new LinkedList<String>();
    Path curr = filePath.getParent();
    while (!curr.equals(rootPath)) {
        dirs.add(0, curr.getName());
        curr = curr.getParent();
        if (curr == null)
            throw new IllegalArgumentException(filePath.toString() + " is not a subpath of " + rootPath.toString());
    }
    _relPath = Utils.join(dirs, "/");
}

44. PailTap#commitResource()

Project: dfs-datastores
Source File: PailTap.java
View license
@Override
public boolean commitResource(JobConf conf) throws IOException {
    Pail p = Pail.create(_pailRoot, ((PailScheme) getScheme()).getSpec(), false);
    FileSystem fs = p.getFileSystem();
    Path tmpPath = new Path(_pailRoot, "_temporary");
    if (fs.exists(tmpPath)) {
        LOG.info("Deleting _temporary directory left by Hadoop job: " + tmpPath.toString());
        fs.delete(tmpPath, true);
    }
    Path tmp2Path = new Path(_pailRoot, "_temporary2");
    if (fs.exists(tmp2Path)) {
        LOG.info("Deleting _temporary2 directory: " + tmp2Path.toString());
        fs.delete(tmp2Path, true);
    }
    Path logPath = new Path(_pailRoot, "_logs");
    if (fs.exists(logPath)) {
        LOG.info("Deleting _logs directory left by Hadoop job: " + logPath.toString());
        fs.delete(logPath, true);
    }
    return true;
}

45. HiveDRTool#cleanTempFiles()

Project: falcon
Source File: HiveDRTool.java
View license
private void cleanTempFiles() {
    Path eventsDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, inputOptions.getJobName());
    Path metaFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + META_PATH_FILE_SUFFIX);
    Path eventsFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + ".id");
    try {
        if (jobFS.exists(metaFilePath)) {
            jobFS.delete(metaFilePath, true);
        }
        if (jobFS.exists(eventsFilePath)) {
            jobFS.delete(eventsFilePath, true);
        }
    } catch (IOException e) {
        LOG.error("Deleting Temp files failed", e);
    }
}

46. FileSystemStorage#fileSystemEvictor()

Project: falcon
Source File: FileSystemStorage.java
View license
private void fileSystemEvictor(String feedPath, String retentionLimit, TimeZone timeZone, Path logFilePath) throws IOException, ELException, FalconException {
    Path normalizedPath = new Path(feedPath);
    FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(normalizedPath.toUri());
    feedPath = normalizedPath.toUri().getPath();
    LOG.info("Normalized path: {}", feedPath);
    Pair<Date, Date> range = EvictionHelper.getDateRange(retentionLimit);
    List<Path> toBeDeleted = discoverInstanceToDelete(feedPath, timeZone, range.first, fs);
    if (toBeDeleted.isEmpty()) {
        LOG.info("No instances to delete.");
        return;
    }
    DateFormat dateFormat = new SimpleDateFormat(FeedHelper.FORMAT);
    dateFormat.setTimeZone(timeZone);
    Path feedBasePath = fs.makeQualified(FeedHelper.getFeedBasePath(feedPath));
    for (Path path : toBeDeleted) {
        deleteInstance(fs, path, feedBasePath);
        Date date = FeedHelper.getDate(feedPath, new Path(path.toUri().getPath()), timeZone);
        instanceDates.append(dateFormat.format(date)).append(',');
        instancePaths.append(path).append(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR);
    }
}

47. UnitTestContext#prepare()

Project: falcon
Source File: UnitTestContext.java
View license
protected void prepare(String workflow) throws Exception {
    mkdir(fs, new Path("/falcon"), new FsPermission((short) 511));
    Path wfParent = new Path("/falcon/test");
    fs.delete(wfParent, true);
    Path wfPath = new Path(wfParent, "workflow");
    mkdir(fs, wfPath);
    mkdir(fs, new Path("/falcon/test/workflow/lib"));
    fs.copyFromLocalFile(false, true, new Path(TestContext.class.getResource("/" + workflow).getPath()), new Path(wfPath, "workflow.xml"));
    mkdir(fs, new Path(wfParent, "input/2012/04/20/00"));
    mkdir(fs, new Path(wfParent, "input/2012/04/21/00"));
    Path outPath = new Path(wfParent, "output");
    mkdir(fs, outPath, new FsPermission((short) 511));
}

48. CopyableFile#builder()

Project: gobblin
Source File: CopyableFile.java
View license
/**
   * Get a [email protected] CopyableFile.Builder}.
   *
   * @param originFs [email protected] FileSystem} where original file exists.
   * @param origin [email protected] FileStatus} of the original file.
   * @param datasetRoot Value of [email protected] CopyableDataset#datasetRoot} of the dataset creating this [email protected] CopyableFile}.
   * @param copyConfiguration [email protected] CopyConfiguration} for the copy job.
   * @return a [email protected] CopyableFile.Builder}.
   * @deprecated use [email protected] #fromOriginAndDestination}. This method was changed to remove reliance on dataset root
   *             which is not standard of all datasets. The old functionality on inferring destinations cannot be
   *             achieved without dataset root and common dataset root, so this is an approximation. Copyable datasets
   *             should compute file destinations themselves.
   */
@Deprecated
public static Builder builder(FileSystem originFs, FileStatus origin, Path datasetRoot, CopyConfiguration copyConfiguration) {
    Path relativePath = PathUtils.relativizePath(origin.getPath(), datasetRoot);
    Path targetRoot = new Path(copyConfiguration.getPublishDir(), datasetRoot.getName());
    Path targetPath = new Path(targetRoot, relativePath);
    return _hiddenBuilder().originFS(originFs).origin(origin).destination(targetPath).preserve(copyConfiguration.getPreserve()).configuration(copyConfiguration);
}

49. FsStateStore#createAlias()

Project: gobblin
Source File: FsStateStore.java
View license
@Override
public void createAlias(String storeName, String original, String alias) throws IOException {
    Path originalTablePath = new Path(new Path(this.storeRootDir, storeName), original);
    if (!this.fs.exists(originalTablePath)) {
        throw new IOException(String.format("State file %s does not exist for table %s", originalTablePath, original));
    }
    Path aliasTablePath = new Path(new Path(this.storeRootDir, storeName), alias);
    Path tmpAliasTablePath = new Path(aliasTablePath.getParent(), new Path(TMP_FILE_PREFIX, aliasTablePath.getName()));
    // Make a copy of the original table as a work-around because
    // Hadoop version 1.2.1 has no support for symlink yet.
    HadoopUtils.copyFile(this.fs, originalTablePath, this.fs, aliasTablePath, tmpAliasTablePath, true, this.conf);
}

50. TestReleaseManager#setUp()

Project: hadoop-20
Source File: TestReleaseManager.java
View license
@Override
protected void setUp() throws Exception {
    formatter = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
    conf = new Configuration();
    conf.set("mapred.release.dir", RELEASE_DIR);
    conf.set("mapred.release.working.dir", WORKING_DIR);
    conf.setLong("mapred.release.dir.cleanInterval", CLEAN_INTERVAL);
    conf.setLong("mapred.release.dir.cleanThreshold", CLEAN_THRESHOLD);
    fs = FileSystem.newInstanceLocal(conf);
    releasePath = new Path(RELEASE_DIR);
    workingPath = new Path(WORKING_DIR);
    tagFileName = TAG_FILE_NAME;
    releaseTagPath = new Path(releasePath, tagFileName);
    touchTag();
    crReleaseManager = new CoronaReleaseManager(conf);
    crReleaseManager.start();
}

51. TestSimulatorEndToEnd#testMain()

View license
@Test
public void testMain() throws Exception {
    final Configuration conf = new Configuration();
    final FileSystem lfs = FileSystem.getLocal(conf);
    final Path rootInputDir = new Path(System.getProperty("src.test.data", "data")).makeQualified(lfs);
    final Path traceFile = new Path(rootInputDir, "19-jobs.trace.json.gz");
    final Path topologyFile = new Path(rootInputDir, "19-jobs.topology.json.gz");
    LOG.info("traceFile = " + traceFile.toString() + " topology = " + topologyFile.toString());
    int numJobs = getNumberJobs(traceFile, conf);
    int nTrackers = getNumberTaskTrackers(topologyFile, conf);
    MockSimulatorEngine mockMumak = new MockSimulatorEngine(numJobs, nTrackers);
    String[] args = { traceFile.toString(), topologyFile.toString() };
    int res = ToolRunner.run(new Configuration(), mockMumak, args);
    Assert.assertEquals(res, 0);
}

52. FTPFileSystem#rename()

Project: hadoop-20
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   * 
   * @param client
   * @param src
   * @param dst
   * @return
   * @throws IOException
   */
private boolean rename(FTPClient client, Path src, Path dst) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absoluteSrc = makeAbsolute(workDir, src);
    Path absoluteDst = makeAbsolute(workDir, dst);
    if (!exists(client, absoluteSrc)) {
        throw new IOException("Source path " + src + " does not exist");
    }
    if (exists(client, absoluteDst)) {
        throw new IOException("Destination path " + dst + " already exist, cannot rename!");
    }
    String parentSrc = absoluteSrc.getParent().toUri().toString();
    String parentDst = absoluteDst.getParent().toUri().toString();
    String from = src.getName();
    String to = dst.getName();
    if (!parentSrc.equals(parentDst)) {
        throw new IOException("Cannot rename parent(source): " + parentSrc + ", parent(destination):  " + parentDst);
    }
    client.changeWorkingDirectory(parentSrc);
    boolean renamed = client.rename(from, to);
    return renamed;
}

53. FileSystemContractBaseTest#testWorkingDirectory()

View license
public void testWorkingDirectory() throws Exception {
    Path workDir = path(getDefaultWorkingDirectory());
    assertEquals(workDir, fs.getWorkingDirectory());
    fs.setWorkingDirectory(path("."));
    assertEquals(workDir, fs.getWorkingDirectory());
    fs.setWorkingDirectory(path(".."));
    assertEquals(workDir.getParent(), fs.getWorkingDirectory());
    Path relativeDir = path("hadoop");
    fs.setWorkingDirectory(relativeDir);
    assertEquals(relativeDir, fs.getWorkingDirectory());
    Path absoluteDir = path("/test/hadoop");
    fs.setWorkingDirectory(absoluteDir);
    assertEquals(absoluteDir, fs.getWorkingDirectory());
}

54. FileSystemContractBaseTest#testMkdirs()

View license
public void testMkdirs() throws Exception {
    Path testDir = path("/test/hadoop");
    assertFalse(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    assertTrue(fs.mkdirs(testDir));
    assertTrue(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    assertTrue(fs.mkdirs(testDir));
    assertTrue(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    Path parentDir = testDir.getParent();
    assertTrue(fs.exists(parentDir));
    assertFalse(fs.isFile(parentDir));
    Path grandparentDir = parentDir.getParent();
    assertTrue(fs.exists(grandparentDir));
    assertFalse(fs.isFile(grandparentDir));
}

55. TestDecommissioningStatus#setUp()

View license
@BeforeClass
public static void setUp() throws Exception {
    conf = new Configuration();
    conf.setBoolean("dfs.replication.considerLoad", false);
    // Set up the hosts/exclude files.
    localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    dir = new Path(workingDir, "build/test/data/work-dir/decommission");
    assertTrue(localFileSys.mkdirs(dir));
    excludeFile = new Path(dir, "exclude");
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    conf.setInt("heartbeat.recheck.interval", 2000);
    conf.setInt("dfs.heartbeat.interval", 1);
    conf.setInt("dfs.replication.pending.timeout.sec", 4);
    conf.setInt("dfs.replication.interval", 1000);
    conf.setInt("dfs.namenode.decommission.interval", 1);
    writeConfigFile(localFileSys, excludeFile, null);
    cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
}

56. TestINodeDirectoryReplaceChild#testSetQuota()

View license
/**
   * Test make sure after set quota, all the parent-children relationship are wired up correctly
   */
@Test
public void testSetQuota() throws IOException {
    int consFileSpace = 2048;
    FSDirectory fsd = cluster.getNameNode().namesystem.dir;
    Path dir = new Path("/qdir1/qdir2/qdir3");
    assertTrue(dfs.mkdirs(dir));
    dir = new Path("/qdir1/qdir2/qdir4");
    assertTrue(dfs.mkdirs(dir));
    Path quotaDir = new Path("/qdir1/qdir2");
    dfs.setQuota(quotaDir, FSConstants.QUOTA_DONT_SET, 4 * consFileSpace);
    ContentSummary c = dfs.getContentSummary(quotaDir);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getSpaceQuota(), 4 * consFileSpace);
    INodeDirectory qdir2 = (INodeDirectory) fsd.getINode("/qdir1/qdir2");
    INode qdir3 = fsd.getINode("/qdir1/qdir2/qdir3");
    INode qdir4 = fsd.getINode("/qdir1/qdir2/qdir4");
    assertSame(qdir2, qdir3.parent);
    assertSame(qdir2.getChild("qdir3"), qdir3);
    assertSame(qdir2.getChild("qdir4"), qdir4);
}

57. TestDFSShell#createTree()

Project: hadoop-20
Source File: TestDFSShell.java
View license
static String createTree(FileSystem fs, String name) throws IOException {
    // create a tree
    //   ROOT
    //   |- f1
    //   |- f2
    //   + sub
    //      |- f3
    //      |- f4
    //   ROOT2
    //   |- f1
    String path = "/test/" + name;
    Path root = mkdir(fs, new Path(path));
    Path sub = mkdir(fs, new Path(root, "sub"));
    Path root2 = mkdir(fs, new Path(path + "2"));
    writeFile(fs, new Path(root, "f1"));
    writeFile(fs, new Path(root, "f2"));
    writeFile(fs, new Path(sub, "f3"));
    writeFile(fs, new Path(sub, "f4"));
    writeFile(fs, new Path(root2, "f1"));
    mkdir(fs, new Path(root2, "sub"));
    return path;
}

58. TestTaskCommit#testCommitFail()

Project: hadoop-20
Source File: TestTaskCommit.java
View license
public void testCommitFail() throws IOException {
    Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), "test");
    final Path inDir = new Path(rootDir, "input");
    final Path outDir = new Path(rootDir, "output");
    JobConf jobConf = createJobConf();
    jobConf.setMaxMapAttempts(1);
    jobConf.setOutputCommitter(CommitterWithCommitFail.class);
    RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0);
    rJob.waitForCompletion();
    assertEquals(JobStatus.FAILED, rJob.getJobState());
}

59. JobHistoryHelper#getJobInfoFromHdfsOutputDir()

Project: hadoop-book
Source File: JobHistoryHelper.java
View license
public static JobHistory.JobInfo getJobInfoFromHdfsOutputDir(String outputDir, Configuration conf) throws IOException {
    Path output = new Path(outputDir);
    Path historyLogDir = new Path(output, "_logs/history");
    FileSystem fs = output.getFileSystem(conf);
    if (!fs.exists(output)) {
        throw new IOException("History directory " + historyLogDir.toString() + " does not exist");
    }
    Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir, jobLogFileFilter));
    if (jobFiles.length == 0) {
        throw new IOException("Not a valid history directory " + historyLogDir.toString());
    }
    String[] jobDetails = JobHistory.JobInfo.decodeJobHistoryFileName(jobFiles[0].getName()).split("_");
    String jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4];
    JobHistory.JobInfo job = new JobHistory.JobInfo(jobId);
    DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs);
    return job;
}

60. FTPFileSystem#rename()

Project: hadoop-common
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   * 
   * @param client
   * @param src
   * @param dst
   * @return
   * @throws IOException
   */
private boolean rename(FTPClient client, Path src, Path dst) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absoluteSrc = makeAbsolute(workDir, src);
    Path absoluteDst = makeAbsolute(workDir, dst);
    if (!exists(client, absoluteSrc)) {
        throw new IOException("Source path " + src + " does not exist");
    }
    if (exists(client, absoluteDst)) {
        throw new IOException("Destination path " + dst + " already exist, cannot rename!");
    }
    String parentSrc = absoluteSrc.getParent().toUri().toString();
    String parentDst = absoluteDst.getParent().toUri().toString();
    String from = src.getName();
    String to = dst.getName();
    if (!parentSrc.equals(parentDst)) {
        throw new IOException("Cannot rename parent(source): " + parentSrc + ", parent(destination):  " + parentDst);
    }
    client.changeWorkingDirectory(parentSrc);
    boolean renamed = client.rename(from, to);
    return renamed;
}

61. FileSystemContractBaseTest#testWorkingDirectory()

View license
public void testWorkingDirectory() throws Exception {
    Path workDir = path(getDefaultWorkingDirectory());
    assertEquals(workDir, fs.getWorkingDirectory());
    fs.setWorkingDirectory(path("."));
    assertEquals(workDir, fs.getWorkingDirectory());
    fs.setWorkingDirectory(path(".."));
    assertEquals(workDir.getParent(), fs.getWorkingDirectory());
    Path relativeDir = path("hadoop");
    fs.setWorkingDirectory(relativeDir);
    assertEquals(relativeDir, fs.getWorkingDirectory());
    Path absoluteDir = path("/test/hadoop");
    fs.setWorkingDirectory(absoluteDir);
    assertEquals(absoluteDir, fs.getWorkingDirectory());
}

62. FileSystemContractBaseTest#testMkdirs()

View license
public void testMkdirs() throws Exception {
    Path testDir = path("/test/hadoop");
    assertFalse(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    assertTrue(fs.mkdirs(testDir));
    assertTrue(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    assertTrue(fs.mkdirs(testDir));
    assertTrue(fs.exists(testDir));
    assertFalse(fs.isFile(testDir));
    Path parentDir = testDir.getParent();
    assertTrue(fs.exists(parentDir));
    assertFalse(fs.isFile(parentDir));
    Path grandparentDir = parentDir.getParent();
    assertTrue(fs.exists(grandparentDir));
    assertFalse(fs.isFile(grandparentDir));
}

63. TestStickyBit#confirmCanAppend()

Project: hadoop-common
Source File: TestStickyBit.java
View license
/**
   * Ensure that even if a file is in a directory with the sticky bit on,
   * another user can write to that file (assuming correct permissions).
   */
private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException {
    // Create a tmp directory with wide-open permissions and sticky bit
    Path p = new Path(baseDir, "tmp");
    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));
    // Write a file to the new tmp directory as a regular user
    hdfs = logonAs(user1, conf, hdfs);
    Path file = new Path(p, "foo");
    writeFile(hdfs, file);
    hdfs.setPermission(file, new FsPermission((short) 0777));
    // Log onto cluster as another user and attempt to append to file
    hdfs = logonAs(user2, conf, hdfs);
    Path file2 = new Path(p, "foo");
    FSDataOutputStream h = hdfs.append(file2);
    h.write("Some more data".getBytes());
    h.close();
}

64. TestDFSShell#createTree()

Project: hadoop-common
Source File: TestDFSShell.java
View license
static String createTree(FileSystem fs, String name) throws IOException {
    // create a tree
    //   ROOT
    //   |- f1
    //   |- f2
    //   + sub
    //      |- f3
    //      |- f4
    //   ROOT2
    //   |- f1
    String path = "/test/" + name;
    Path root = mkdir(fs, new Path(path));
    Path sub = mkdir(fs, new Path(root, "sub"));
    Path root2 = mkdir(fs, new Path(path + "2"));
    writeFile(fs, new Path(root, "f1"));
    writeFile(fs, new Path(root, "f2"));
    writeFile(fs, new Path(sub, "f3"));
    writeFile(fs, new Path(sub, "f4"));
    writeFile(fs, new Path(root2, "f1"));
    mkdir(fs, new Path(root2, "sub"));
    return path;
}

65. TestWrappedRecordReaderClassloader#testClassLoader()

View license
/**
   * Tests the class loader set by [email protected] JobConf#setClassLoader(ClassLoader)}
   * is inherited by any [email protected] WrappedRecordReader}s created by
   * [email protected] CompositeRecordReader}
   */
public void testClassLoader() throws Exception {
    JobConf job = new JobConf();
    Fake_ClassLoader classLoader = new Fake_ClassLoader();
    job.setClassLoader(classLoader);
    assertTrue(job.getClassLoader() instanceof Fake_ClassLoader);
    FileSystem fs = FileSystem.get(job);
    Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs);
    Path base = new Path(testdir, "/empty");
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    job.set("mapred.join.expr", CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src));
    CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>();
    inputFormat.getRecordReader(inputFormat.getSplits(job, 1)[0], job, Reporter.NULL);
}

66. TestStickyBit#confirmCanAppend()

Project: hadoop-hdfs
Source File: TestStickyBit.java
View license
/**
   * Ensure that even if a file is in a directory with the sticky bit on,
   * another user can write to that file (assuming correct permissions).
   */
private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException {
    // Create a tmp directory with wide-open permissions and sticky bit
    Path p = new Path(baseDir, "tmp");
    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));
    // Write a file to the new tmp directory as a regular user
    hdfs = logonAs(user1, conf, hdfs);
    Path file = new Path(p, "foo");
    writeFile(hdfs, file);
    hdfs.setPermission(file, new FsPermission((short) 0777));
    // Log onto cluster as another user and attempt to append to file
    hdfs = logonAs(user2, conf, hdfs);
    Path file2 = new Path(p, "foo");
    FSDataOutputStream h = hdfs.append(file2);
    h.write("Some more data".getBytes());
    h.close();
}

67. TestDFSShell#createTree()

Project: hadoop-hdfs
Source File: TestDFSShell.java
View license
static String createTree(FileSystem fs, String name) throws IOException {
    // create a tree
    //   ROOT
    //   |- f1
    //   |- f2
    //   + sub
    //      |- f3
    //      |- f4
    //   ROOT2
    //   |- f1
    String path = "/test/" + name;
    Path root = mkdir(fs, new Path(path));
    Path sub = mkdir(fs, new Path(root, "sub"));
    Path root2 = mkdir(fs, new Path(path + "2"));
    writeFile(fs, new Path(root, "f1"));
    writeFile(fs, new Path(root, "f2"));
    writeFile(fs, new Path(sub, "f3"));
    writeFile(fs, new Path(sub, "f4"));
    writeFile(fs, new Path(root2, "f1"));
    mkdir(fs, new Path(root2, "sub"));
    return path;
}

68. TestSimulatorEndToEnd#testMain()

View license
@Test
public void testMain() throws Exception {
    final Configuration conf = new Configuration();
    final FileSystem lfs = FileSystem.getLocal(conf);
    final Path rootInputDir = new Path(System.getProperty("src.test.data", "data")).makeQualified(lfs);
    final Path traceFile = new Path(rootInputDir, "19-jobs.trace.json.gz");
    final Path topologyFile = new Path(rootInputDir, "19-jobs.topology.json.gz");
    LOG.info("traceFile = " + traceFile.toString() + " topology = " + topologyFile.toString());
    int numJobs = getNumberJobs(traceFile, conf);
    int nTrackers = getNumberTaskTrackers(topologyFile, conf);
    MockSimulatorEngine mockMumak = new MockSimulatorEngine(numJobs, nTrackers);
    String[] args = { traceFile.toString(), topologyFile.toString() };
    int res = ToolRunner.run(new Configuration(), mockMumak, args);
    Assert.assertEquals(res, 0);
}

69. HiveImport#getHiveBinPath()

Project: hadoop-mapreduce
Source File: HiveImport.java
View license
/** 
   * @return the filename of the hive executable to run to do the import
   */
private String getHiveBinPath() {
    // If the user has $HIVE_HOME set, then use $HIVE_HOME/bin/hive if it
    // exists.
    // Fall back to just plain 'hive' and hope it's in the path.
    String hiveHome = options.getHiveHome();
    if (null == hiveHome) {
        return "hive";
    }
    Path p = new Path(hiveHome);
    p = new Path(p, "bin");
    p = new Path(p, "hive");
    String hiveBinStr = p.toString();
    if (new File(hiveBinStr).exists()) {
        return hiveBinStr;
    } else {
        return "hive";
    }
}

70. TestWrappedRecordReaderClassloader#testClassLoader()

View license
/**
   * Tests the class loader set by [email protected] JobConf#setClassLoader(ClassLoader)}
   * is inherited by any [email protected] WrappedRecordReader}s created by
   * [email protected] CompositeRecordReader}
   */
public void testClassLoader() throws Exception {
    JobConf job = new JobConf();
    Fake_ClassLoader classLoader = new Fake_ClassLoader();
    job.setClassLoader(classLoader);
    assertTrue(job.getClassLoader() instanceof Fake_ClassLoader);
    FileSystem fs = FileSystem.get(job);
    Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs);
    Path base = new Path(testdir, "/empty");
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src));
    CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>();
    inputFormat.getRecordReader(inputFormat.getSplits(job, 1)[0], job, Reporter.NULL);
}

71. TestTaskCommit#testCommitFail()

View license
public void testCommitFail() throws IOException {
    Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), "test");
    final Path inDir = new Path(rootDir, "./input");
    final Path outDir = new Path(rootDir, "./output");
    JobConf jobConf = createJobConf();
    jobConf.setMaxMapAttempts(1);
    jobConf.setOutputCommitter(CommitterWithCommitFail.class);
    RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0);
    rJob.waitForCompletion();
    assertEquals(JobStatus.FAILED, rJob.getJobState());
}

72. TestMultipleInputs#setUp()

View license
@Before
public void setUp() throws Exception {
    super.setUp();
    Path rootDir = getDir(ROOT_DIR);
    Path in1Dir = getDir(IN1_DIR);
    Path in2Dir = getDir(IN2_DIR);
    Configuration conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    if (!fs.mkdirs(in1Dir)) {
        throw new IOException("Mkdirs failed to create " + in1Dir.toString());
    }
    if (!fs.mkdirs(in2Dir)) {
        throw new IOException("Mkdirs failed to create " + in2Dir.toString());
    }
}

73. TestWrappedRRClassloader#testClassLoader()

View license
/**
   * Tests the class loader set by 
   * [email protected] Configuration#setClassLoader(ClassLoader)}
   * is inherited by any [email protected] WrappedRecordReader}s created by
   * [email protected] CompositeRecordReader}
   */
public void testClassLoader() throws Exception {
    Configuration conf = new Configuration();
    Fake_ClassLoader classLoader = new Fake_ClassLoader();
    conf.setClassLoader(classLoader);
    assertTrue(conf.getClassLoader() instanceof Fake_ClassLoader);
    FileSystem fs = FileSystem.get(conf);
    Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs);
    Path base = new Path(testdir, "/empty");
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src));
    CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>();
    // create dummy TaskAttemptID
    TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
    conf.set(JobContext.TASK_ATTEMPT_ID, tid.toString());
    inputFormat.createRecordReader(inputFormat.getSplits(new Job(conf)).get(0), new TaskAttemptContextImpl(conf, tid));
}

74. NutchData#generate()

Project: HiBench
Source File: NutchData.java
View license
/*	
	private void test2LevelMapFile(Path furl) throws IOException {

		JobConf job = new JobConf();
		FileSystem fs = FileSystem.get(job);
		MapFile.Reader reader = new MapFile.Reader(fs, furl.toString(), job);
		Text value = new Text();
		reader.get(new LongWritable(1000), value);
		if (null != value) {
			log.info("---Find it: <1000, " + value + ">");
		}
	}
*/
public void generate() throws Exception {
    init();
    createNutchUrls();
    createNutchIndexData();
    Path ffetch = new Path(options.getResultPath(), CrawlDatum.FETCH_DIR_NAME);
    Path fparse = new Path(options.getResultPath(), CrawlDatum.PARSE_DIR_NAME);
    Path linkdb = new Path(segment, LINKDB_DIR_NAME);
    FileSystem fs = ffetch.getFileSystem(new Configuration());
    fs.rename(ffetch, new Path(segment, CrawlDatum.FETCH_DIR_NAME));
    fs.rename(fparse, new Path(segment, CrawlDatum.PARSE_DIR_NAME));
    fs.rename(linkdb, new Path(options.getResultPath(), LINKDB_DIR_NAME));
    fs.close();
    close();
}

75. TestStore#init()

Project: hindex
Source File: TestStore.java
View license
private void init(String methodName, Configuration conf, HColumnDescriptor hcd) throws IOException {
    //Setting up a Store
    Path basedir = new Path(DIR + methodName);
    Path logdir = new Path(DIR + methodName + "/logs");
    Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
    FileSystem fs = FileSystem.get(conf);
    fs.delete(logdir, true);
    HTableDescriptor htd = new HTableDescriptor(table);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
    HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
    store = new Store(basedir, region, hcd, fs, conf);
}

76. TestHLogSplit#testRecoveredEditsPathForMeta()

Project: hindex
Source File: TestHLogSplit.java
View license
/**
   * @throws IOException
   * @see https://issues.apache.org/jira/browse/HBASE-3020
   */
@Test
public void testRecoveredEditsPathForMeta() throws IOException {
    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
    byte[] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
    Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
    Path regiondir = new Path(tdir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
    fs.mkdirs(regiondir);
    long now = System.currentTimeMillis();
    HLog.Entry entry = new HLog.Entry(new HLogKey(encoded, HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit());
    Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true);
    String parentOfParent = p.getParent().getParent().getName();
    assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
}

77. TestCopyRecoveredEditsTask#testNoEditsDir()

View license
/**
   * Check that we don't get an exception if there is no recovered edits directory to copy
   * @throws Exception on failure
   */
@Test
public void testNoEditsDir() throws Exception {
    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
    ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
    FileSystem fs = UTIL.getTestFileSystem();
    Path root = UTIL.getDataTestDir();
    String regionName = "regionA";
    Path regionDir = new Path(root, regionName);
    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
    try {
        // doesn't really matter where the region's snapshot directory is, but this is pretty close
        Path snapshotRegionDir = new Path(workingDir, regionName);
        fs.mkdirs(snapshotRegionDir);
        Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionDir);
        assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits));
        CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir);
        task.call();
    } finally {
        // cleanup the working directory
        FSUtils.delete(fs, regionDir, true);
        FSUtils.delete(fs, workingDir, true);
    }
}

78. TestSnapshotDescriptionUtils#testCompleteSnapshotWithNoSnapshotDirectoryFailure()

View license
/**
   * Test that we throw an exception if there is no working snapshot directory when we attempt to
   * 'complete' the snapshot
   * @throws Exception on failure
   */
@Test
public void testCompleteSnapshotWithNoSnapshotDirectoryFailure() throws Exception {
    Path snapshotDir = new Path(root, HConstants.SNAPSHOT_DIR_NAME);
    Path tmpDir = new Path(snapshotDir, ".tmp");
    Path workingDir = new Path(tmpDir, "not_a_snapshot");
    assertFalse("Already have working snapshot dir: " + workingDir + " but shouldn't. Test file leak?", fs.exists(workingDir));
    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
    try {
        SnapshotDescriptionUtils.completeSnapshot(snapshot, root, workingDir, fs);
        fail("Shouldn't successfully complete move of a non-existent directory.");
    } catch (IOException e) {
        LOG.info("Correctly failed to move non-existant directory: " + e.getMessage());
    }
}

79. OfflineMetaRebuildTestCore#createRegion()

View license
protected HRegionInfo createRegion(Configuration conf, final HTable htbl, byte[] startKey, byte[] endKey) throws IOException {
    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getTableName(), startKey, endKey);
    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegion.REGIONINFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    hri.write(out);
    out.close();
    // add to meta.
    Put put = new Put(hri.getRegionName());
    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri));
    meta.put(put);
    meta.flushCommits();
    return hri;
}

80. TestHBaseFsck#getFlushedHFile()

Project: hindex
Source File: TestHBaseFsck.java
View license
/**
   * We don't have an easy way to verify that a flush completed, so we loop until we find a
   * legitimate hfile and return it.
   * @param fs
   * @param table
   * @return Path of a flushed hfile.
   * @throws IOException
   */
Path getFlushedHFile(FileSystem fs, String table) throws IOException {
    Path tableDir = FSUtils.getTablePath(FSUtils.getRootDir(conf), table);
    Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0);
    Path famDir = new Path(regionDir, FAM_STR);
    // keep doing this until we get a legit hfile
    while (true) {
        FileStatus[] hfFss = fs.listStatus(famDir);
        if (hfFss.length == 0) {
            continue;
        }
        for (FileStatus hfs : hfFss) {
            if (!hfs.isDir()) {
                return hfs.getPath();
            }
        }
    }
}

81. DemuxManager#moveFolder()

Project: HiTune
Source File: DemuxManager.java
View license
/**
     * Move sourceFolder inside destFolder
     * @param srcDir
     * @param destDir
     * @return
     * @throws IOException
     */
protected boolean moveFolder(String srcDir, String destDir, String prefix) throws IOException {
    if (!destDir.endsWith("/")) {
        destDir += "/";
    }
    Path pSrcDir = new Path(srcDir);
    Path pDestDir = new Path(destDir);
    setup(pDestDir);
    destDir += prefix + "_" + System.currentTimeMillis();
    Path pFinalDestDir = new Path(destDir);
    return fs.rename(pSrcDir, pFinalDestDir);
}

82. JobFilePartitioner#getTargetDirectory()

Project: hraven
Source File: JobFilePartitioner.java
View license
/**
   * @param hdfs
   *          FileSystem handle
   * @param outputPath
   *          base directory where files to be written to
   * @param fileModTime
   *          of the file that needs to be moved/copied to hdfs
   * @return the existing path in HDFS to write to the file to. Will be created
   *         if it does not exist.
   * @throws IOException
   *           if the year/month/day directory with cannot be created in
   *           outputPath.
   */
private Path getTargetDirectory(FileSystem hdfs, Path outputPath, long fileModTime) throws IOException {
    String year = YEAR_FORMAT.format(new Date(fileModTime));
    String month = MONTH_FORMAT.format(new Date(fileModTime));
    String day = DAY_FORMAT.format(new Date(fileModTime));
    Path yearDir = new Path(outputPath, year);
    Path monthDir = new Path(yearDir, month);
    Path dayDir = new Path(monthDir, day);
    // HBase for it.
    if (!hdfs.exists(dayDir)) {
        if (hdfs.mkdirs(dayDir)) {
            LOG.info("Created: " + dayDir.toString());
        } else {
            throw new IOException("Unable to create target directory with date: " + dayDir.getName());
        }
    }
    return dayDir;
}

83. TestFileLister#testGetJobIdFromPath()

Project: hraven
Source File: TestFileLister.java
View license
@Test
public void testGetJobIdFromPath() {
    String JOB_HISTORY_FILE_NAME = "src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
    File jobHistoryfile = new File(JOB_HISTORY_FILE_NAME);
    Path srcPath = new Path(jobHistoryfile.toURI());
    String jobId = FileLister.getJobIdFromPath(srcPath);
    String expJobId = "job_1329348432655_0001";
    assertEquals(expJobId, jobId);
    String JOB_CONF_FILE_NAME = "src/test/resources/job_1329348432655_0001_conf.xml";
    File jobConfFile = new File(JOB_CONF_FILE_NAME);
    srcPath = new Path(jobConfFile.toURI());
    jobId = FileLister.getJobIdFromPath(srcPath);
    assertEquals(expJobId, jobId);
    jobConfFile = new File("job_201311192236_3583_1386370578196_user1_Sleep+job");
    srcPath = new Path(jobConfFile.toURI());
    jobId = FileLister.getJobIdFromPath(srcPath);
    expJobId = "job_201311192236_3583";
    assertEquals(expJobId, jobId);
}

84. HadoopIgfs20FileSystemAbstractSelfTest#testCreateBase()

View license
/** @throws Exception If failed. */
public void testCreateBase() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
    Path file = new Path(dir, "someFile");
    assertPathDoesNotExist(fs, file);
    FsPermission fsPerm = new FsPermission((short) 644);
    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(fsPerm));
    // Try to write something in file.
    os.write("abc".getBytes());
    os.close();
    // Check file status.
    FileStatus fileStatus = fs.getFileStatus(file);
    assertFalse(fileStatus.isDirectory());
    assertEquals(file, fileStatus.getPath());
    assertEquals(fsPerm, fileStatus.getPermission());
}

85. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteFailsIfNonRecursive()

View license
/** @throws Exception If failed. */
public void testDeleteFailsIfNonRecursive() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    final Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
    GridTestUtils.assertThrows(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            fs.delete(someDir2, false);
            return null;
        }
    }, PathIsNotEmptyDirectoryException.class, null);
    assertPathExists(fs, someDir2);
    assertPathExists(fs, someDir3);
}

86. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteRecursively()

View license
/** @throws Exception If failed. */
public void testDeleteRecursively() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
    assertTrue(fs.delete(someDir2, true));
    assertPathDoesNotExist(fs, someDir2);
    assertPathDoesNotExist(fs, someDir3);
}

87. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteRecursivelyFromRoot()

View license
/** @throws Exception If failed. */
public void testDeleteRecursivelyFromRoot() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    Path root = new Path(fsHome, "/");
    assertFalse(fs.delete(root, true));
    assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true));
    assertPathDoesNotExist(fs, someDir3);
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
    assertPathExists(fs, root);
}

88. HadoopIgfs20FileSystemAbstractSelfTest#testSetPermissionCheckNonRecursiveness()

View license
/** @throws Exception If failed. */
public void testSetPermissionCheckNonRecursiveness() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");
    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    Path tmpDir = new Path(fsHome, "/tmp");
    FsPermission perm = new FsPermission((short) 123);
    fs.setPermission(tmpDir, perm);
    assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
}

89. HadoopIgfs20FileSystemAbstractSelfTest#testSetOwnerCheckNonRecursiveness()

View license
/** @throws Exception If failed. */
public void testSetOwnerCheckNonRecursiveness() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");
    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    Path tmpDir = new Path(fsHome, "/tmp");
    fs.setOwner(file, "fUser", "fGroup");
    fs.setOwner(tmpDir, "dUser", "dGroup");
    assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
    assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
    assertEquals("fUser", fs.getFileStatus(file).getOwner());
    assertEquals("fGroup", fs.getFileStatus(file).getGroup());
}

90. HadoopIgfs20FileSystemAbstractSelfTest#testAppendIfPathPointsToDirectory()

View license
/** @throws Exception If failed. */
public void testAppendIfPathPointsToDirectory() throws Exception {
    final Path fsHome = new Path(primaryFsUri);
    final Path dir = new Path(fsHome, "/tmp");
    Path file = new Path(dir, "my");
    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault()));
        }
    }, IOException.class, null);
}

91. HadoopIgfs20FileSystemAbstractSelfTest#testRenameIfSrcPathDoesNotExist()

View license
/** @throws Exception If failed. */
public void testRenameIfSrcPathDoesNotExist() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    final Path srcFile = new Path(fsHome, "srcFile");
    final Path dstFile = new Path(fsHome, "dstFile");
    assertPathDoesNotExist(fs, srcFile);
    GridTestUtils.assertThrows(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            fs.rename(srcFile, dstFile);
            return null;
        }
    }, FileNotFoundException.class, null);
    assertPathDoesNotExist(fs, dstFile);
}

92. HadoopIgfs20FileSystemAbstractSelfTest#testRenameFile()

View license
/** @throws Exception If failed. */
public void testRenameFile() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path srcFile = new Path(fsHome, "/tmp/srcFile");
    Path dstFile = new Path(fsHome, "/tmp/dstFile");
    FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    fs.rename(srcFile, dstFile);
    assertPathDoesNotExist(fs, srcFile);
    assertPathExists(fs, dstFile);
}

93. HadoopIgfs20FileSystemAbstractSelfTest#testRenameDirectory()

View license
/** @throws Exception If failed. */
public void testRenameDirectory() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path dir = new Path(fsHome, "/tmp/");
    Path newDir = new Path(fsHome, "/tmpNew/");
    FSDataOutputStream os = fs.create(new Path(dir, "myFile"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
    os.close();
    fs.rename(dir, newDir);
    assertPathDoesNotExist(fs, dir);
    assertPathExists(fs, newDir);
}

94. HadoopIgfs20FileSystemAbstractSelfTest#testMkdirs()

View license
/** @throws Exception If failed. */
@SuppressWarnings("OctalInteger")
public void testMkdirs() throws Exception {
    Path fsHome = new Path(primaryFileSystemUriPath());
    Path dir = new Path(fsHome, "/tmp/staging");
    Path nestedDir = new Path(dir, "nested");
    FsPermission dirPerm = FsPermission.createImmutable((short) 0700);
    FsPermission nestedDirPerm = FsPermission.createImmutable((short) 111);
    fs.mkdir(dir, dirPerm, true);
    fs.mkdir(nestedDir, nestedDirPerm, true);
    assertEquals(dirPerm, fs.getFileStatus(dir).getPermission());
    assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission());
    assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner());
    assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner());
}

95. IgniteHadoopFileSystemAbstractSelfTest#testCreateBase()

View license
/** @throws Exception If failed. */
@SuppressWarnings("deprecation")
public void testCreateBase() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
    Path file = new Path(dir, "someFile");
    assertPathDoesNotExist(fs, file);
    FsPermission fsPerm = new FsPermission((short) 644);
    FSDataOutputStream os = fs.create(file, fsPerm, false, 1, (short) 1, 1L, null);
    // Try to write something in file.
    os.write("abc".getBytes());
    os.close();
    // Check file status.
    FileStatus fileStatus = fs.getFileStatus(file);
    assertFalse(fileStatus.isDir());
    assertEquals(file, fileStatus.getPath());
    assertEquals(fsPerm, fileStatus.getPermission());
}

96. IgniteHadoopFileSystemAbstractSelfTest#testDeleteFailsIfNonRecursive()

View license
/** @throws Exception If failed. */
public void testDeleteFailsIfNonRecursive() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    fs.create(someDir3).close();
    Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
    assertFalse(fs.delete(someDir2, false));
    assertPathExists(fs, someDir2);
    assertPathExists(fs, someDir3);
}

97. IgniteHadoopFileSystemAbstractSelfTest#testDeleteRecursively()

View license
/** @throws Exception If failed. */
public void testDeleteRecursively() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    FSDataOutputStream os = fs.create(someDir3);
    os.close();
    Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
    assertTrue(fs.delete(someDir2, true));
    assertPathDoesNotExist(fs, someDir2);
    assertPathDoesNotExist(fs, someDir3);
}

98. IgniteHadoopFileSystemAbstractSelfTest#testDeleteRecursivelyFromRoot()

View license
/** @throws Exception If failed. */
public void testDeleteRecursivelyFromRoot() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
    FSDataOutputStream os = fs.create(someDir3);
    os.close();
    Path root = new Path(fsHome, "/");
    assertFalse(fs.delete(root, true));
    assertTrue(fs.delete(new Path("/someDir1"), true));
    assertPathDoesNotExist(fs, someDir3);
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
    assertPathExists(fs, root);
}

99. IgniteHadoopFileSystemAbstractSelfTest#testSetPermissionCheckNonRecursiveness()

View license
/** @throws Exception If failed. */
@SuppressWarnings("deprecation")
public void testSetPermissionCheckNonRecursiveness() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");
    FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
    os.close();
    Path tmpDir = new Path(fsHome, "/tmp");
    FsPermission perm = new FsPermission((short) 123);
    fs.setPermission(tmpDir, perm);
    assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
}

100. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckNonRecursiveness()

View license
/** @throws Exception If failed. */
public void testSetOwnerCheckNonRecursiveness() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");
    FSDataOutputStream os = fs.create(file);
    os.close();
    Path tmpDir = new Path(fsHome, "/tmp");
    fs.setOwner(file, "fUser", "fGroup");
    fs.setOwner(tmpDir, "dUser", "dGroup");
    assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
    assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
    assertEquals("fUser", fs.getFileStatus(file).getOwner());
    assertEquals("fGroup", fs.getFileStatus(file).getGroup());
}