Here are the examples of the java api class org.apache.hadoop.fs.Path taken from open source projects.
1. HFileCorruptionChecker#createQuarantinePath()
View license/** * Given a path, generates a new path to where we move a corrupted hfile (bad * trailer, no trailer). * * @param hFile * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table * /region/cf/file) * @return path to where corrupted files are stored. This should be * HBASE_DIR/.corrupt/table/region/cf/file. */ Path createQuarantinePath(Path hFile) { // extract the normal dirs structure Path cfDir = hFile.getParent(); Path regionDir = cfDir.getParent(); Path tableDir = regionDir.getParent(); // build up the corrupted dirs strcture Path corruptBaseDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get("hbase.hfile.quarantine.dir", HConstants.CORRUPT_DIR_NAME)); Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName()); Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName()); Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName()); Path corruptHfile = new Path(corruptFamilyDir, hFile.getName()); return corruptHfile; }
2. TestHarFileSystem#setUp()
View license@BeforeClass public static void setUp() throws Exception { dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); inputPath = new Path(fs.getHomeDirectory(), "test"); inputrelPath = new Path(fs.getHomeDirectory().toUri().getPath().substring(1), "test"); filea = new Path(inputPath, "a"); fileb = new Path(inputPath, "b"); filec = new Path(inputPath, "c c"); filed = new Path(inputPath, "d%d"); // check for har containing escape worthy // characters in there names archivePath = new Path(fs.getHomeDirectory(), "tmp"); fs.mkdirs(inputPath); CopyFilesBase.createFileWithContent(fs, filea, "a".getBytes()); CopyFilesBase.createFileWithContent(fs, fileb, "b".getBytes()); CopyFilesBase.createFileWithContent(fs, filec, "c".getBytes()); CopyFilesBase.createFileWithContent(fs, filed, "d".getBytes()); }
3. TestHarFileSystem#setUp()
View licenseprotected void setUp() throws Exception { super.setUp(); dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); inputPath = new Path(fs.getHomeDirectory(), "test"); inputrelPath = new Path(fs.getHomeDirectory().toUri().getPath().substring(1), "test"); filea = new Path(inputPath, "a"); fileb = new Path(inputPath, "b"); filec = new Path(inputPath, "c"); archivePath = new Path(fs.getHomeDirectory(), "tmp"); fs.mkdirs(inputPath); FSDataOutputStream out = fs.create(filea); out.write("a".getBytes()); out.close(); out = fs.create(fileb); out.write("b".getBytes()); out.close(); out = fs.create(filec); out.write("c".getBytes()); out.close(); }
4. HFileLink#getHFileFromBackReference()
View license/** * Get the full path of the HFile referenced by the back reference * * @param rootDir root hbase directory * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile * @throws IOException on unexpected error. */ public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) { int separatorIndex = linkRefPath.getName().indexOf('.'); String linkRegionName = linkRefPath.getName().substring(0, separatorIndex); String linkTableName = linkRefPath.getName().substring(separatorIndex + 1); String hfileName = getBackReferenceFileName(linkRefPath.getParent()); Path familyPath = linkRefPath.getParent().getParent(); Path regionPath = familyPath.getParent(); Path tablePath = regionPath.getParent(); String linkName = createHFileLinkName(tablePath.getName(), regionPath.getName(), hfileName); Path linkTableDir = FSUtils.getTablePath(rootDir, linkTableName); Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); }
5. TestFileSystemMetadataProvider#testCreateMetadataFiles()
View license@Test public void testCreateMetadataFiles() throws IOException { ensureCreated(); Path namedDirectory = new Path(testDirectory, NAME); Path metadataDirectory = new Path(namedDirectory, ".metadata"); Path propertiesFile = new Path(metadataDirectory, "descriptor.properties"); Path schemaFile = new Path(metadataDirectory, "schema.avsc"); Assert.assertTrue("Named directory should exist for name:" + NAME, fileSystem.exists(namedDirectory)); Assert.assertTrue("Metadata directory should exist", fileSystem.exists(metadataDirectory)); Assert.assertTrue("Descriptor properties file should exist", fileSystem.exists(propertiesFile)); Assert.assertTrue("Descriptor schema file should exist", fileSystem.exists(schemaFile)); }
6. TestFileSystemMetadataProvider#testDeleteRemovesMetadataFiles()
View license@Test public void testDeleteRemovesMetadataFiles() throws IOException { testCreateMetadataFiles(); DatasetDescriptor loaded = provider.load(NAME); Path namedDirectory = new Path(loaded.getLocation()); Path metadataDirectory = new Path(namedDirectory, ".metadata"); Path propertiesFile = new Path(metadataDirectory, "descriptor.properties"); Path schemaFile = new Path(metadataDirectory, "schema.avsc"); boolean result = provider.delete(NAME); Assert.assertTrue(result); Assert.assertFalse("Descriptor properties file should not exist", fileSystem.exists(propertiesFile)); Assert.assertFalse("Descriptor schema file should not exist", fileSystem.exists(schemaFile)); Assert.assertFalse("Metadata directory should not exist", fileSystem.exists(metadataDirectory)); Assert.assertTrue("Named directory should still exist for name:" + NAME, fileSystem.exists(namedDirectory)); }
7. TestImpersonationQueries#createRecordReadersData()
View licenseprivate static void createRecordReadersData(String user, String group) throws Exception { // copy sequence file updateClient(user); Path localFile = new Path(FileUtils.getResourceAsFile("/sequencefiles/simple.seq").toURI().toString()); Path dfsFile = new Path(getUserHome(user), "simple.seq"); fs.copyFromLocalFile(localFile, dfsFile); fs.setOwner(dfsFile, user, group); fs.setPermission(dfsFile, new FsPermission((short) 0700)); localFile = new Path(AvroTestUtil.generateSimplePrimitiveSchema_NoNullValues().getFilePath()); dfsFile = new Path(getUserHome(user), "simple.avro"); fs.copyFromLocalFile(localFile, dfsFile); fs.setOwner(dfsFile, user, group); fs.setPermission(dfsFile, new FsPermission((short) 0700)); }
8. FilteredCopyListingTest#testRunNoPattern()
View license@Test public void testRunNoPattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); new FilteredCopyListing(new Configuration(), CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, -1); }
9. FilteredCopyListingTest#testRunStarPattern()
View license@Test public void testRunStarPattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); Configuration configuration = new Configuration(); configuration.set("falcon.include.path", "*/3/*"); new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, 3); }
10. FilteredCopyListingTest#testRunQuestionPattern()
View license@Test public void testRunQuestionPattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); Configuration configuration = new Configuration(); configuration.set("falcon.include.path", "*/3/?"); new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, 2); }
11. FilteredCopyListingTest#testRunRangePattern()
View license@Test public void testRunRangePattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); Configuration configuration = new Configuration(); configuration.set("falcon.include.path", "*/3/[47]"); new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, 2); }
12. FilteredCopyListingTest#testRunSpecificPattern()
View license@Test public void testRunSpecificPattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); Configuration configuration = new Configuration(); configuration.set("falcon.include.path", "*/3/40"); new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, 1); }
13. FilteredCopyListingTest#testRunListPattern()
View license@Test public void testRunListPattern() throws Exception { final URI uri = FileSystem.getLocal(new Configuration()).getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "///tmp/source"); Path target = new Path(fileSystemPath.toString() + "///tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "///tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setSyncFolder(true); Configuration configuration = new Configuration(); configuration.set("falcon.include.path", "*/3/{4,7}"); new FilteredCopyListing(configuration, CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath, 2); }
14. SimpleDatasetsFinder#findDistinctDatasets()
View license/** * Create a dataset using {@link #inputDir} and {@link #destDir}. * Set dataset input path to be {@link #destDir} if {@link #recompactDatasets} is true. */ @Override public Set<Dataset> findDistinctDatasets() throws IOException { Set<Dataset> datasets = Sets.newHashSet(); Path inputPath = new Path(this.inputDir); Path inputLatePath = new Path(inputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX); Path outputPath = new Path(this.destDir); Path outputLatePath = new Path(outputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX); Dataset dataset = new Dataset.Builder().withPriority(this.getDatasetPriority(inputPath.getName())).withLateDataThresholdForRecompact(this.getDatasetRecompactThreshold(inputPath.getName())).withInputPath(this.recompactDatasets ? outputPath : inputPath).withInputLatePath(this.recompactDatasets ? outputLatePath : inputLatePath).withOutputPath(outputPath).withOutputLatePath(outputLatePath).withOutputTmpPath(new Path(this.tmpOutputDir)).build(); datasets.add(dataset); return datasets; }
15. FsRenameCommitStepTest#setUp()
View license@BeforeClass public void setUp() throws IOException { this.fs = FileSystem.getLocal(new Configuration()); this.fs.delete(new Path(ROOT_DIR), true); Path dir1 = new Path(ROOT_DIR, "dir1"); Path dir2 = new Path(ROOT_DIR, "dir2"); this.fs.mkdirs(dir1); this.fs.mkdirs(dir2); Path src = new Path(dir1, "file"); Path dst = new Path(dir2, "file"); this.fs.createNewFile(src); this.step = (FsRenameCommitStep) new FsRenameCommitStep.Builder<>().from(src).to(dst).withProps(new State()).build(); }
16. TestHdfsSpout#testSimpleText_noACK()
View license@Test public void testSimpleText_noACK() throws IOException { Path file1 = new Path(source.toString() + "/file1.txt"); createTextFile(file1, 5); Path file2 = new Path(source.toString() + "/file2.txt"); createTextFile(file2, 5); Map conf = getDefaultConfig(); conf.put(Configs.COMMIT_FREQ_COUNT, "1"); conf.put(Configs.COMMIT_FREQ_SEC, "1"); HdfsSpout spout = makeSpout(0, conf, Configs.TEXT, TextFileReader.defaultFields); runSpout(spout, "r11"); Path arc1 = new Path(archive.toString() + "/file1.txt"); Path arc2 = new Path(archive.toString() + "/file2.txt"); checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2); }
17. TestHdfsSpout#testSimpleText_ACK()
View license@Test public void testSimpleText_ACK() throws IOException { Path file1 = new Path(source.toString() + "/file1.txt"); createTextFile(file1, 5); Path file2 = new Path(source.toString() + "/file2.txt"); createTextFile(file2, 5); Map conf = getDefaultConfig(); conf.put(Configs.COMMIT_FREQ_COUNT, "1"); conf.put(Configs.COMMIT_FREQ_SEC, "1"); // enable acking conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, "1"); HdfsSpout spout = makeSpout(0, conf, Configs.TEXT, TextFileReader.defaultFields); // consume file 1 runSpout(spout, "r6", "a0", "a1", "a2", "a3", "a4"); Path arc1 = new Path(archive.toString() + "/file1.txt"); checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1); // consume file 2 runSpout(spout, "r6", "a5", "a6", "a7", "a8", "a9"); Path arc2 = new Path(archive.toString() + "/file2.txt"); checkCollectorOutput_txt((MockCollector) spout.getCollector(), arc1, arc2); }
18. TestTezCommonUtils#testTezDAGRecoveryStagingPath()
View license// Testing DAG specific recovery path staging dir @Test(timeout = 5000) public void testTezDAGRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); Path dagRecoveryPathj = TezCommonUtils.getDAGRecoveryPath(recoveryStageDir, "dag_123"); String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME + Path.SEPARATOR + "2" + Path.SEPARATOR + "dag_123" + TezConstants.DAG_RECOVERY_RECOVER_FILE_SUFFIX; Assert.assertEquals(expectedDir, dagRecoveryPathj.toString()); }
19. TestTezCommonUtils#testTezSummaryRecoveryStagingPath()
View license// Testing Summary recovery path staging dir @Test(timeout = 5000) public void testTezSummaryRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); Path summaryRecoveryPathj = TezCommonUtils.getSummaryRecoveryPath(recoveryStageDir); String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME + Path.SEPARATOR + "2" + Path.SEPARATOR + TezConstants.DAG_RECOVERY_SUMMARY_FILE_SUFFIX; Assert.assertEquals(expectedDir, summaryRecoveryPathj.toString()); }
20. IndexLoadIncrementalHFile#splitStoreFile()
View licenseprotected List<LoadQueueItem> splitStoreFile(final LoadQueueItem item, final HTable table, byte[] startKey, byte[] splitKey) throws IOException { final Path hfilePath = item.hfilePath; // We use a '_' prefix which is ignored when walking directory trees // above. final Path tmpDir = new Path(item.hfilePath.getParent(), "_tmp"); LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting..."); String uniqueName = getUniqueName(table.getTableName()); HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family); Path botOut = new Path(tmpDir, uniqueName + ".bottom"); Path topOut = new Path(tmpDir, uniqueName + ".top"); splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut); // Add these back at the *front* of the queue, so there's a lower // chance that the region will just split again before we get there. List<LoadQueueItem> lqis = new ArrayList<LoadQueueItem>(2); lqis.add(new LoadQueueItem(item.family, botOut)); lqis.add(new LoadQueueItem(item.family, topOut)); LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut); return lqis; }
21. MultipleOutputFormat#getInputFileBasedOutputFileName()
View license/** * Generate the outfile name based on a given anme and the input file name. If * the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job), * the given name is returned unchanged. If the config value for * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given * name is returned unchanged. Otherwise, return a file name consisting of the * N trailing legs of the input file name where N is the config value for * "num.of.trailing.legs.to.use". * * @param job * the job config * @param name * the output file name * @return the outfile name based on a given anme and the input file name. */ protected String getInputFileBasedOutputFileName(JobConf job, String name) { String infilepath = job.get(JobContext.MAP_INPUT_FILE); if (infilepath == null) { // then return the given name return name; } int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0); if (numOfTrailingLegsToUse <= 0) { return name; } Path infile = new Path(infilepath); Path parent = infile.getParent(); String midName = infile.getName(); Path outPath = new Path(midName); for (int i = 1; i < numOfTrailingLegsToUse; i++) { if (parent == null) break; midName = parent.getName(); if (midName.length() == 0) break; parent = parent.getParent(); outPath = new Path(midName, outPath); } return outPath.toString(); }
22. TestLoadIncrementalHFiles#testSplitStoreFile()
View license@Test public void testSplitStoreFile() throws IOException { Path dir = util.getDataTestDir("testSplitHFile"); FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000); Path bottomOut = new Path(dir, "bottom.out"); Path topOut = new Path(dir, "top.out"); LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc, Bytes.toBytes("ggg"), bottomOut, topOut); int rowCount = verifyHFile(bottomOut); rowCount += verifyHFile(topOut); assertEquals(1000, rowCount); }
23. TestCatalogJanitor#createReferences()
View license/** * @param services Master services instance. * @param htd * @param parent * @param daughter * @param midkey * @param top True if we are to write a 'top' reference. * @return Path to reference we created. * @throws IOException */ private Path createReferences(final MasterServices services, final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte[] midkey, final boolean top) throws IOException { Path rootdir = services.getMasterFileSystem().getRootDir(); Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName()); Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(), htd.getColumnFamilies()[0].getName()); Reference ref = new Reference(midkey, top ? Reference.Range.top : Reference.Range.bottom); long now = System.currentTimeMillis(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); ref.write(fs, p); return p; }
24. TestFSTableDescriptors#testSequenceidAdvancesOnTableInfo()
View license@Test public void testSequenceidAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); assertTrue(i1 == i0 + 1); Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); assertTrue(i2 == i1 + 1); }
25. TestFSTableDescriptors#testFormatTableInfoSequenceId()
View license@Test public void testFormatTableInfoSequenceId() { Path p0 = assertWriteAndReadSequenceid(0); // Assert p0 has format we expect. StringBuilder sb = new StringBuilder(); for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(), p0.getName()); // Check a few more. Path p2 = assertWriteAndReadSequenceid(2); Path p10000 = assertWriteAndReadSequenceid(10000); // Get a .tablinfo that has no sequenceid suffix. Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); FSTableDescriptors.FileStatusFileNameComparator comparator = new FSTableDescriptors.FileStatusFileNameComparator(); assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0); }
26. MultipleOutputFormat#getInputFileBasedOutputFileName()
View license/** * Generate the outfile name based on a given anme and the input file name. If * the map input file does not exists (i.e. this is not for a map only job), * the given name is returned unchanged. If the config value for * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given * name is returned unchanged. Otherwise, return a file name consisting of the * N trailing legs of the input file name where N is the config value for * "num.of.trailing.legs.to.use". * * @param job * the job config * @param name * the output file name * @return the outfile name based on a given anme and the input file name. */ protected String getInputFileBasedOutputFileName(JobConf job, String name) { String infilepath = job.get("map.input.file"); if (infilepath == null) { // if the map input file does not exists, then return the given name return name; } int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0); if (numOfTrailingLegsToUse <= 0) { return name; } Path infile = new Path(infilepath); Path parent = infile.getParent(); String midName = infile.getName(); Path outPath = new Path(midName); for (int i = 1; i < numOfTrailingLegsToUse; i++) { if (parent == null) break; midName = parent.getName(); if (midName.length() == 0) break; parent = parent.getParent(); outPath = new Path(midName, outPath); } return outPath.toString(); }
27. TestDistributedCache#setUp()
View license/** * @see TestCase#setUp() */ @Override protected void setUp() throws IOException { conf = new Configuration(); conf.setLong("local.cache.size", LOCAL_CACHE_LIMIT); conf.set("mapred.local.dir", MAPRED_LOCAL_DIR); conf.setLong("local.cache.numbersubdir", LOCAL_CACHE_FILES); FileUtil.fullyDelete(new File(TEST_CACHE_BASE_DIR)); FileUtil.fullyDelete(new File(TEST_ROOT_DIR)); localfs = FileSystem.get(LOCAL_FS, conf); firstCacheFile = new Path(TEST_ROOT_DIR + "/firstcachefile"); secondCacheFile = new Path(TEST_ROOT_DIR + "/secondcachefile"); thirdCacheFile = new Path(TEST_ROOT_DIR + "/thirdcachefile"); fourthCacheFile = new Path(TEST_ROOT_DIR + "/fourthcachefile"); createTempFile(localfs, firstCacheFile, 4 * 1024); createTempFile(localfs, secondCacheFile, 2 * 1024); createTempFile(localfs, thirdCacheFile, 1); createTempFile(localfs, fourthCacheFile, 1); }
28. TestHarFileSystem#testSpaces()
View license@Test public void testSpaces() throws Exception { fs.delete(archivePath, true); Configuration conf = mapred.createJobConf(); HadoopArchives har = new HadoopArchives(conf); String[] args = new String[6]; args[0] = "-archiveName"; args[1] = "foo bar.har"; args[2] = "-p"; args[3] = fs.getHomeDirectory().toString(); args[4] = "test"; args[5] = archivePath.toString(); int ret = ToolRunner.run(har, args); assertTrue("failed test", ret == 0); Path finalPath = new Path(archivePath, "foo bar.har"); Path fsPath = new Path(inputPath.toUri().getPath()); Path filePath = new Path(finalPath, "test"); // make it a har path Path harPath = new Path("har://" + filePath.toUri().getPath()); FileSystem harFs = harPath.getFileSystem(conf); FileStatus[] statuses = harFs.listStatus(finalPath); }
29. TestDecommission#setup()
View license@Before public void setup() throws IOException { conf = new Configuration(); // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/work-dir/decommission/"); hostsFile = new Path(dir, "hosts"); excludeFile = new Path(dir, "exclude"); cleanFile(hostsFile); cleanFile(excludeFile); // Setup conf conf.setBoolean("dfs.replication.considerLoad", false); conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); conf.setInt("heartbeat.recheck.interval", 2000); conf.setInt("dfs.heartbeat.interval", HEARTBEAT_INTERVAL); conf.setInt("dfs.replication.pending.timeout.sec", 4); writeConfigFile(excludeFile, null); }
30. MultipleOutputFormat#getInputFileBasedOutputFileName()
View license/** * Generate the outfile name based on a given anme and the input file name. If * the map input file does not exists (i.e. this is not for a map only job), * the given name is returned unchanged. If the config value for * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given * name is returned unchanged. Otherwise, return a file name consisting of the * N trailing legs of the input file name where N is the config value for * "num.of.trailing.legs.to.use". * * @param job * the job config * @param name * the output file name * @return the outfile name based on a given anme and the input file name. */ protected String getInputFileBasedOutputFileName(JobConf job, String name) { String infilepath = job.get("map.input.file"); if (infilepath == null) { // if the map input file does not exists, then return the given name return name; } int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0); if (numOfTrailingLegsToUse <= 0) { return name; } Path infile = new Path(infilepath); Path parent = infile.getParent(); String midName = infile.getName(); Path outPath = new Path(midName); for (int i = 1; i < numOfTrailingLegsToUse; i++) { if (parent == null) break; midName = parent.getName(); if (midName.length() == 0) break; parent = parent.getParent(); outPath = new Path(midName, outPath); } return outPath.toString(); }
31. DataProviderFactoryTest#testGetVectorDataProviderForOverwriteWhenExists()
View license@Test @Category(UnitTest.class) public void testGetVectorDataProviderForOverwriteWhenExists() throws IOException { // Using an existing resource Path tmpDir = HadoopFileUtils.createUniqueTmp(); Path testTsvPath = new Path(test_tsv); HadoopFileUtils.copyToHdfs(testTsvPath.getParent(), tmpDir, test_tsv_filename); HadoopFileUtils.copyToHdfs(testTsvPath.getParent(), tmpDir, test_tsv_columns_filename); Path hdfsTsvPath = new Path(tmpDir, test_tsv_filename); Path hdfsColumnsPath = new Path(tmpDir, test_tsv_columns_filename); try { VectorDataProvider dp = DataProviderFactory.getVectorDataProvider(hdfsTsvPath.toString(), AccessMode.OVERWRITE, providerProperties); Assert.assertNotNull(dp); Assert.assertFalse(HadoopFileUtils.exists(hdfsTsvPath)); Assert.assertFalse(HadoopFileUtils.exists(hdfsColumnsPath)); } finally { HadoopFileUtils.delete(tmpDir); } }
32. TestCSVFileReader#createCSVFiles()
View license@BeforeClass public static void createCSVFiles() throws IOException { localfs = LocalFileSystem.getInstance(); csvFile = new Path("target/temp.csv"); reorderedFile = new Path("target/reordered.csv"); tsvFile = new Path("target/temp.tsv"); validatorFile = new Path("target/validator.csv"); FSDataOutputStream out = localfs.create(csvFile, true); out.writeBytes(CSV_CONTENT); out.close(); out = localfs.create(reorderedFile, true); out.writeBytes(REORDERED_CSV_CONTENT); out.close(); out = localfs.create(validatorFile, true); out.writeBytes(VALIDATOR_CSV_CONTENT); out.close(); out = localfs.create(tsvFile, true); out.writeBytes(TSV_CONTENT); out.close(); }
33. TestFileSystemMetadataProvider#testCreateMetadataFiles()
View license@Test public void testCreateMetadataFiles() throws IOException { ensureCreated(); Path namedDirectory = new Path(testDirectory, new Path(NAMESPACE, NAME)); Path metadataDirectory = new Path(namedDirectory, ".metadata"); Path propertiesFile = new Path(metadataDirectory, "descriptor.properties"); Path schemaDirectory = new Path(metadataDirectory, "schemas"); Assert.assertTrue("Named directory should exist for name:" + NAME, fileSystem.exists(namedDirectory)); Assert.assertTrue("Metadata directory should exist", fileSystem.exists(metadataDirectory)); Assert.assertTrue("Descriptor properties file should exist", fileSystem.exists(propertiesFile)); Assert.assertTrue("Descriptor schema directory should exist", fileSystem.exists(schemaDirectory)); }
34. TestFileSystemMetadataProvider#testDeleteRemovesMetadataFiles()
View license@Test public void testDeleteRemovesMetadataFiles() throws IOException { testCreateMetadataFiles(); DatasetDescriptor loaded = provider.load(NAMESPACE, NAME); Path namedDirectory = new Path(loaded.getLocation()); Path metadataDirectory = new Path(namedDirectory, ".metadata"); Path propertiesFile = new Path(metadataDirectory, "descriptor.properties"); Path schemaDirectory = new Path(metadataDirectory, "schemas"); boolean result = provider.delete(NAMESPACE, NAME); Assert.assertTrue(result); Assert.assertFalse("Descriptor properties file should not exist", fileSystem.exists(propertiesFile)); Assert.assertFalse("Descriptor schema directory should not exist", fileSystem.exists(schemaDirectory)); Assert.assertFalse("Metadata directory should not exist", fileSystem.exists(metadataDirectory)); Assert.assertTrue("Named directory should still exist for name:" + NAME, fileSystem.exists(namedDirectory)); }
35. TestFileSystemMetadataProvider#testUpdatePreviousFormat()
View license@Test public void testUpdatePreviousFormat() throws IOException { useOldRepositoryFormat(); DatasetDescriptor oldFormatDescriptor = provider.load(NAMESPACE, NAME); Path namedDirectory = new Path(oldFormatDescriptor.getLocation()); Path metadataDirectory = new Path(namedDirectory, ".metadata"); Path schemaDirectory = new Path(metadataDirectory, "schemas"); Path newSchemaLocation = new Path(schemaDirectory, "1.avsc"); // Performing an update against a dataset in the old location should bring it // into the new location. DatasetDescriptor updated = new DatasetDescriptor.Builder(oldFormatDescriptor).build(); provider.update(NAMESPACE, NAME, updated); Assert.assertEquals(testDescriptor.getSchema(), oldFormatDescriptor.getSchema()); Assert.assertTrue("Schema should exist at the new location.", fileSystem.exists(newSchemaLocation)); }
36. TestTezCommonUtils#testTezDAGRecoveryStagingPath()
View license// Testing DAG specific recovery path staging dir @Test public void testTezDAGRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); Path dagRecoveryPathj = TezCommonUtils.getDAGRecoveryPath(recoveryStageDir, "dag_123"); String expectedDir = RESOLVED_STAGE_DIR + File.separatorChar + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + File.separatorChar + strAppId + File.separator + TezConfiguration.DAG_RECOVERY_DATA_DIR_NAME + File.separator + "2" + File.separator + "dag_123" + TezConfiguration.DAG_RECOVERY_RECOVER_FILE_SUFFIX; Assert.assertEquals(dagRecoveryPathj.toString(), expectedDir); }
37. TestTezCommonUtils#testTezSummaryRecoveryStagingPath()
View license// Testing Summary recovery path staging dir @Test public void testTezSummaryRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); Path summaryRecoveryPathj = TezCommonUtils.getSummaryRecoveryPath(recoveryStageDir); String expectedDir = RESOLVED_STAGE_DIR + File.separatorChar + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + File.separatorChar + strAppId + File.separator + TezConfiguration.DAG_RECOVERY_DATA_DIR_NAME + File.separator + "2" + File.separator + TezConfiguration.DAG_RECOVERY_SUMMARY_FILE_SUFFIX; Assert.assertEquals(summaryRecoveryPathj.toString(), expectedDir); }
38. TestSimulatorEndToEnd#testMain()
View license@Test public void testMain() throws Exception { final Configuration conf = new Configuration(); final FileSystem lfs = FileSystem.getLocal(conf); final Path rootInputDir = new Path(System.getProperty("src.test.data", "data")).makeQualified(lfs); final Path traceFile = new Path(rootInputDir, "19-jobs.trace.json.gz"); final Path topologyFile = new Path(rootInputDir, "19-jobs.topology.json.gz"); LOG.info("traceFile = " + traceFile.toString() + " topology = " + topologyFile.toString()); int numJobs = getNumberJobs(traceFile, conf); int nTrackers = getNumberTaskTrackers(topologyFile, conf); MockSimulatorEngine mockMumak = new MockSimulatorEngine(numJobs, nTrackers); String[] args = { traceFile.toString(), topologyFile.toString() }; int res = ToolRunner.run(new Configuration(), mockMumak, args); Assert.assertEquals(res, 0); }
39. HiveImport#getHiveBinPath()
View license/** * @return the filename of the hive executable to run to do the import */ private String getHiveBinPath() { // If the user has $HIVE_HOME set, then use $HIVE_HOME/bin/hive if it // exists. // Fall back to just plain 'hive' and hope it's in the path. String hiveHome = options.getHiveHome(); String hiveCommand = Shell.WINDOWS ? "hive.cmd" : "hive"; if (null == hiveHome) { return hiveCommand; } Path p = new Path(hiveHome); p = new Path(p, "bin"); p = new Path(p, hiveCommand); String hiveBinStr = p.toString(); if (new File(hiveBinStr).exists()) { return hiveBinStr; } else { return hiveCommand; } }
40. ConsistentListingAspectTest#testThreshold()
View license@Test public void testThreshold() throws Exception { Path p1 = new Path(testPath.toUri() + "/deleteMarkerListing-1.test"); Path p2 = new Path(testPath.toUri() + "/deleteMarkerListing-2.test"); Path p3 = new Path(testPath.toUri() + "/deleteMarkerListing-3.test"); deleteFs.create(p1).close(); deleteFs.create(p2).close(); meta.add(p3, false); conf.setFloat("s3mper.listing.threshold", 0.5f); System.out.println("Watining for s3 . . ."); Thread.sleep(10000); try { FileStatus[] files = deleteFs.listStatus(testPath); assertEquals("Didn't list the correct number of files", 2, files.length); } catch (Exception e) { fail("Threshold not met, but should have been"); } }
41. ConsistentListingAspectTest#testDeleteMarkerListing()
View license@Test public void testDeleteMarkerListing() throws Exception { Path p1 = new Path(testPath.toUri() + "/deleteMarkerListing-1.test"); Path p2 = new Path(testPath.toUri() + "/deleteMarkerListing-2.test"); Path p3 = new Path(testPath.toUri() + "/deleteMarkerListing-3.test"); deleteFs.create(p1).close(); deleteFs.create(p2).close(); deleteFs.create(p3).close(); deleteFs.delete(p1, false); deleteFs.delete(p3, false); assertEquals("Wrong number of fs listed files", 1, deleteFs.listStatus(testPath).length); assertEquals("Wrong number of metastore listed files", 3, meta.list(Collections.singletonList(testPath)).size()); }
42. AbstractTestHiveClientS3#testGetFileStatus()
View license@Test public void testGetFileStatus() throws Exception { Path basePath = new Path("s3://presto-test-hive/"); Path tablePath = new Path(basePath, "presto_test_s3"); Path filePath = new Path(tablePath, "test1.csv"); FileSystem fs = hdfsEnvironment.getFileSystem("user", basePath); assertTrue(isDirectory(fs.getFileStatus(basePath))); assertTrue(isDirectory(fs.getFileStatus(tablePath))); assertFalse(isDirectory(fs.getFileStatus(filePath))); assertFalse(fs.exists(new Path(basePath, "foo"))); }
43. TestMapReduceActionExecutor#testStreaming()
View licensepublic void testStreaming() throws Exception { FileSystem fs = getFileSystem(); Path streamingJar = new Path(getFsTestCaseDir(), "jar/hadoop-streaming.jar"); InputStream is = new FileInputStream(ClassUtils.findContainingJar(StreamJob.class)); OutputStream os = fs.create(new Path(getAppPath(), streamingJar)); IOUtils.copyStream(is, os); Path inputDir = new Path(getFsTestCaseDir(), "input"); Path outputDir = new Path(getFsTestCaseDir(), "output"); Writer w = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt"))); w.write("dummy\n"); w.write("dummy\n"); w.close(); String actionXml = "<map-reduce>" + "<job-tracker>" + getJobTrackerUri() + "</job-tracker>" + "<name-node>" + getNameNodeUri() + "</name-node>" + " <streaming>" + " <mapper>cat</mapper>" + " <reducer>wc</reducer>" + " </streaming>" + getStreamingConfig(inputDir.toString(), outputDir.toString()).toXmlString(false) + "<file>" + streamingJar + "</file>" + "</map-reduce>"; _testSubmit("streaming", actionXml); }
44. FileSplitTest#testFindSplitFileOldVersion()
View license@Test @Category(UnitTest.class) public void testFindSplitFileOldVersion() throws Exception { Path parent = new Path(FileSplitTest.class.getName() + "-testRootPath"); Path newSplitsFile = new Path(parent, "splits"); Path oldSplitsFile = new Path(parent, "splits.txt"); FileSplit spySubject = new FileSplit(); subject = spy(spySubject); doReturn(false).when(subject).fileExists(newSplitsFile); doReturn(true).when(subject).fileExists(oldSplitsFile); Assert.assertEquals(oldSplitsFile.toString(), subject.findSplitFile(parent).toString()); }
45. TestTezCommonUtils#testTezAttemptRecoveryStagingPath()
View license// Testing app attempt specific recovery path staging dir @Test public void testTezAttemptRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); String expectedDir = RESOLVED_STAGE_DIR + File.separatorChar + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + File.separatorChar + strAppId + File.separator + TezConfiguration.DAG_RECOVERY_DATA_DIR_NAME + File.separator + "2"; Assert.assertEquals(recoveryStageDir.toString(), expectedDir); }
46. IgniteHadoopFileSystemAbstractSelfTest#testMkdirs()
View license/** @throws Exception If failed. */ @SuppressWarnings("OctalInteger") public void testMkdirs() throws Exception { Path fsHome = new Path(PRIMARY_URI); final Path dir = new Path(fsHome, "/tmp/staging"); final Path nestedDir = new Path(dir, "nested"); final FsPermission dirPerm = FsPermission.createImmutable((short) 0700); final FsPermission nestedDirPerm = FsPermission.createImmutable((short) 111); assertTrue(fs.mkdirs(dir, dirPerm)); assertTrue(fs.mkdirs(nestedDir, nestedDirPerm)); assertEquals(dirPerm, fs.getFileStatus(dir).getPermission()); assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission()); assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner()); assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner()); }
47. IgniteHadoopFileSystemAbstractSelfTest#testRenameDirectory()
View license/** @throws Exception If failed. */ public void testRenameDirectory() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/tmp/"); Path newDir = new Path(fsHome, "/tmpNew/"); FSDataOutputStream os = fs.create(new Path(dir, "myFile")); os.close(); assertTrue("Rename failed [dir=" + dir + ", newDir=" + newDir + ']', fs.rename(dir, newDir)); assertPathDoesNotExist(fs, dir); assertPathExists(fs, newDir); }
48. IgniteHadoopFileSystemAbstractSelfTest#testRenameDirectoryIfDstPathExists()
View license/** @throws Exception If failed. */ public void testRenameDirectoryIfDstPathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcDir = new Path(fsHome, "/tmp/"); Path dstDir = new Path(fsHome, "/tmpNew/"); FSDataOutputStream os = fs.create(new Path(srcDir, "file1")); os.close(); os = fs.create(new Path(dstDir, "file2")); os.close(); assertTrue("Rename succeeded [srcDir=" + srcDir + ", dstDir=" + dstDir + ']', fs.rename(srcDir, dstDir)); assertPathExists(fs, dstDir); assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp")); assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp/file1")); }
49. IgniteHadoopFileSystemAbstractSelfTest#testRenameFile()
View license/** @throws Exception If failed. */ public void testRenameFile() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "/tmp/srcFile"); Path dstFile = new Path(fsHome, "/tmp/dstFile"); FSDataOutputStream os = fs.create(srcFile); os.close(); assertTrue(fs.rename(srcFile, dstFile)); assertPathDoesNotExist(fs, srcFile); assertPathExists(fs, dstFile); }
50. IgniteHadoopFileSystemAbstractSelfTest#testRenameFileIfDstPathExists()
View license/** @throws Exception If failed. */ public void testRenameFileIfDstPathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "srcFile"); Path dstFile = new Path(fsHome, "dstFile"); FSDataOutputStream os = fs.create(srcFile); os.close(); os = fs.create(dstFile); os.close(); assertFalse(fs.rename(srcFile, dstFile)); assertPathExists(fs, srcFile); assertPathExists(fs, dstFile); }
51. IgniteHadoopFileSystemAbstractSelfTest#testAppendIfPathPointsToDirectory()
View license/** @throws Exception If failed. */ public void testAppendIfPathPointsToDirectory() throws Exception { final Path fsHome = new Path(primaryFsUri); final Path dir = new Path(fsHome, "/tmp"); Path file = new Path(dir, "my"); FSDataOutputStream os = fs.create(file); os.close(); GridTestUtils.assertThrowsInherited(log, new Callable<Object>() { @Override public Object call() throws Exception { return fs.append(new Path(fsHome, dir), 1024); } }, IOException.class, null); }
52. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckNonRecursiveness()
View license/** @throws Exception If failed. */ public void testSetOwnerCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); fs.setOwner(file, "fUser", "fGroup"); fs.setOwner(tmpDir, "dUser", "dGroup"); assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner()); assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup()); assertEquals("fUser", fs.getFileStatus(file).getOwner()); assertEquals("fGroup", fs.getFileStatus(file).getGroup()); }
53. TestWrappedRRClassloader#testClassLoader()
View license/** * Tests the class loader set by * {@link Configuration#setClassLoader(ClassLoader)} * is inherited by any {@link WrappedRecordReader}s created by * {@link CompositeRecordReader} */ public void testClassLoader() throws Exception { Configuration conf = new Configuration(); Fake_ClassLoader classLoader = new Fake_ClassLoader(); conf.setClassLoader(classLoader); assertTrue(conf.getClassLoader() instanceof Fake_ClassLoader); FileSystem fs = FileSystem.get(conf); Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs); Path base = new Path(testdir, "/empty"); Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") }; conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src)); CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>(); // create dummy TaskAttemptID TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0); conf.set(JobContext.TASK_ATTEMPT_ID, tid.toString()); inputFormat.createRecordReader(inputFormat.getSplits(new Job(conf)).get(0), new TaskAttemptContextImpl(conf, tid)); }
54. TestMultipleInputs#setUp()
View license@Before public void setUp() throws Exception { super.setUp(); Path rootDir = getDir(ROOT_DIR); Path in1Dir = getDir(IN1_DIR); Path in2Dir = getDir(IN2_DIR); Configuration conf = createJobConf(); FileSystem fs = FileSystem.get(conf); fs.delete(rootDir, true); if (!fs.mkdirs(in1Dir)) { throw new IOException("Mkdirs failed to create " + in1Dir.toString()); } if (!fs.mkdirs(in2Dir)) { throw new IOException("Mkdirs failed to create " + in2Dir.toString()); } }
55. TestTaskCommit#testCommitFail()
View licensepublic void testCommitFail() throws IOException { Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), "test"); final Path inDir = new Path(rootDir, "./input"); final Path outDir = new Path(rootDir, "./output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); }
56. TestWrappedRecordReaderClassloader#testClassLoader()
View license/** * Tests the class loader set by {@link JobConf#setClassLoader(ClassLoader)} * is inherited by any {@link WrappedRecordReader}s created by * {@link CompositeRecordReader} */ public void testClassLoader() throws Exception { JobConf job = new JobConf(); Fake_ClassLoader classLoader = new Fake_ClassLoader(); job.setClassLoader(classLoader); assertTrue(job.getClassLoader() instanceof Fake_ClassLoader); FileSystem fs = FileSystem.get(job); Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs); Path base = new Path(testdir, "/empty"); Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") }; job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src)); CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>(); inputFormat.getRecordReader(inputFormat.getSplits(job, 1)[0], job, Reporter.NULL); }
57. TestHLogSplit#testRecoveredEditsPathForMeta()
View license/** * @throws IOException * @see https://issues.apache.org/jira/browse/HBASE-3020 */ @Test public void testRecoveredEditsPathForMeta() throws IOException { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); byte[] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME)); Path regiondir = new Path(tdir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); long now = System.currentTimeMillis(); HLog.Entry entry = new HLog.Entry(new HLogKey(encoded, HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true); String parentOfParent = p.getParent().getParent().getName(); assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); }
58. TestStore#init()
View licenseprivate void init(String methodName, Configuration conf, HColumnDescriptor hcd) throws IOException { //Setting up a Store Path basedir = new Path(DIR + methodName); Path logdir = new Path(DIR + methodName + "/logs"); Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); store = new Store(basedir, region, hcd, fs, conf); }
59. NutchData#generate()
View license/* private void test2LevelMapFile(Path furl) throws IOException { JobConf job = new JobConf(); FileSystem fs = FileSystem.get(job); MapFile.Reader reader = new MapFile.Reader(fs, furl.toString(), job); Text value = new Text(); reader.get(new LongWritable(1000), value); if (null != value) { log.info("---Find it: <1000, " + value + ">"); } } */ public void generate() throws Exception { init(); createNutchUrls(); createNutchIndexData(); Path ffetch = new Path(options.getResultPath(), CrawlDatum.FETCH_DIR_NAME); Path fparse = new Path(options.getResultPath(), CrawlDatum.PARSE_DIR_NAME); Path linkdb = new Path(segment, LINKDB_DIR_NAME); FileSystem fs = ffetch.getFileSystem(new Configuration()); fs.rename(ffetch, new Path(segment, CrawlDatum.FETCH_DIR_NAME)); fs.rename(fparse, new Path(segment, CrawlDatum.PARSE_DIR_NAME)); fs.rename(linkdb, new Path(options.getResultPath(), LINKDB_DIR_NAME)); fs.close(); close(); }
60. HiveImport#getHiveBinPath()
View license/** * @return the filename of the hive executable to run to do the import */ private String getHiveBinPath() { // If the user has $HIVE_HOME set, then use $HIVE_HOME/bin/hive if it // exists. // Fall back to just plain 'hive' and hope it's in the path. String hiveHome = options.getHiveHome(); if (null == hiveHome) { return "hive"; } Path p = new Path(hiveHome); p = new Path(p, "bin"); p = new Path(p, "hive"); String hiveBinStr = p.toString(); if (new File(hiveBinStr).exists()) { return hiveBinStr; } else { return "hive"; } }
61. TestDFSShell#createTree()
View licensestatic String createTree(FileSystem fs, String name) throws IOException { // create a tree // ROOT // |- f1 // |- f2 // + sub // |- f3 // |- f4 // ROOT2 // |- f1 String path = "/test/" + name; Path root = mkdir(fs, new Path(path)); Path sub = mkdir(fs, new Path(root, "sub")); Path root2 = mkdir(fs, new Path(path + "2")); writeFile(fs, new Path(root, "f1")); writeFile(fs, new Path(root, "f2")); writeFile(fs, new Path(sub, "f3")); writeFile(fs, new Path(sub, "f4")); writeFile(fs, new Path(root2, "f1")); mkdir(fs, new Path(root2, "sub")); return path; }
62. IgniteHadoopFileSystemAbstractSelfTest#testSetPermissionCheckNonRecursiveness()
View license/** @throws Exception If failed. */ @SuppressWarnings("deprecation") public void testSetPermissionCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); FsPermission perm = new FsPermission((short) 123); fs.setPermission(tmpDir, perm); assertEquals(perm, fs.getFileStatus(tmpDir).getPermission()); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission()); }
63. IgniteHadoopFileSystemAbstractSelfTest#testDeleteRecursivelyFromRoot()
View license/** @throws Exception If failed. */ public void testDeleteRecursivelyFromRoot() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3); os.close(); Path root = new Path(fsHome, "/"); assertFalse(fs.delete(root, true)); assertTrue(fs.delete(new Path("/someDir1"), true)); assertPathDoesNotExist(fs, someDir3); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2")); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1")); assertPathExists(fs, root); }
64. IgniteHadoopFileSystemAbstractSelfTest#testDeleteRecursively()
View license/** @throws Exception If failed. */ public void testDeleteRecursively() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3); os.close(); Path someDir2 = new Path(fsHome, "/someDir1/someDir2"); assertTrue(fs.delete(someDir2, true)); assertPathDoesNotExist(fs, someDir2); assertPathDoesNotExist(fs, someDir3); }
65. IgniteHadoopFileSystemAbstractSelfTest#testDeleteFailsIfNonRecursive()
View license/** @throws Exception If failed. */ public void testDeleteFailsIfNonRecursive() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); fs.create(someDir3).close(); Path someDir2 = new Path(fsHome, "/someDir1/someDir2"); assertFalse(fs.delete(someDir2, false)); assertPathExists(fs, someDir2); assertPathExists(fs, someDir3); }
66. IgniteHadoopFileSystemAbstractSelfTest#testCreateBase()
View license/** @throws Exception If failed. */ @SuppressWarnings("deprecation") public void testCreateBase() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3"); Path file = new Path(dir, "someFile"); assertPathDoesNotExist(fs, file); FsPermission fsPerm = new FsPermission((short) 644); FSDataOutputStream os = fs.create(file, fsPerm, false, 1, (short) 1, 1L, null); // Try to write something in file. os.write("abc".getBytes()); os.close(); // Check file status. FileStatus fileStatus = fs.getFileStatus(file); assertFalse(fileStatus.isDir()); assertEquals(file, fileStatus.getPath()); assertEquals(fsPerm, fileStatus.getPermission()); }
67. HadoopIgfs20FileSystemAbstractSelfTest#testMkdirs()
View license/** @throws Exception If failed. */ @SuppressWarnings("OctalInteger") public void testMkdirs() throws Exception { Path fsHome = new Path(primaryFileSystemUriPath()); Path dir = new Path(fsHome, "/tmp/staging"); Path nestedDir = new Path(dir, "nested"); FsPermission dirPerm = FsPermission.createImmutable((short) 0700); FsPermission nestedDirPerm = FsPermission.createImmutable((short) 111); fs.mkdir(dir, dirPerm, true); fs.mkdir(nestedDir, nestedDirPerm, true); assertEquals(dirPerm, fs.getFileStatus(dir).getPermission()); assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission()); assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner()); assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner()); }
68. HadoopIgfs20FileSystemAbstractSelfTest#testRenameDirectory()
View license/** @throws Exception If failed. */ public void testRenameDirectory() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/tmp/"); Path newDir = new Path(fsHome, "/tmpNew/"); FSDataOutputStream os = fs.create(new Path(dir, "myFile"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); fs.rename(dir, newDir); assertPathDoesNotExist(fs, dir); assertPathExists(fs, newDir); }
69. HadoopIgfs20FileSystemAbstractSelfTest#testRenameFile()
View license/** @throws Exception If failed. */ public void testRenameFile() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "/tmp/srcFile"); Path dstFile = new Path(fsHome, "/tmp/dstFile"); FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); fs.rename(srcFile, dstFile); assertPathDoesNotExist(fs, srcFile); assertPathExists(fs, dstFile); }
70. HadoopIgfs20FileSystemAbstractSelfTest#testRenameIfSrcPathDoesNotExist()
View license/** @throws Exception If failed. */ public void testRenameIfSrcPathDoesNotExist() throws Exception { Path fsHome = new Path(primaryFsUri); final Path srcFile = new Path(fsHome, "srcFile"); final Path dstFile = new Path(fsHome, "dstFile"); assertPathDoesNotExist(fs, srcFile); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.rename(srcFile, dstFile); return null; } }, FileNotFoundException.class, null); assertPathDoesNotExist(fs, dstFile); }
71. HadoopIgfs20FileSystemAbstractSelfTest#testAppendIfPathPointsToDirectory()
View license/** @throws Exception If failed. */ public void testAppendIfPathPointsToDirectory() throws Exception { final Path fsHome = new Path(primaryFsUri); final Path dir = new Path(fsHome, "/tmp"); Path file = new Path(dir, "my"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); GridTestUtils.assertThrowsInherited(log, new Callable<Object>() { @Override public Object call() throws Exception { return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault())); } }, IOException.class, null); }
72. HadoopIgfs20FileSystemAbstractSelfTest#testSetOwnerCheckNonRecursiveness()
View license/** @throws Exception If failed. */ public void testSetOwnerCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); fs.setOwner(file, "fUser", "fGroup"); fs.setOwner(tmpDir, "dUser", "dGroup"); assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner()); assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup()); assertEquals("fUser", fs.getFileStatus(file).getOwner()); assertEquals("fGroup", fs.getFileStatus(file).getGroup()); }
73. HadoopIgfs20FileSystemAbstractSelfTest#testSetPermissionCheckNonRecursiveness()
View license/** @throws Exception If failed. */ public void testSetPermissionCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); FsPermission perm = new FsPermission((short) 123); fs.setPermission(tmpDir, perm); assertEquals(perm, fs.getFileStatus(tmpDir).getPermission()); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission()); }
74. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteRecursivelyFromRoot()
View license/** @throws Exception If failed. */ public void testDeleteRecursivelyFromRoot() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path root = new Path(fsHome, "/"); assertFalse(fs.delete(root, true)); assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true)); assertPathDoesNotExist(fs, someDir3); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2")); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1")); assertPathExists(fs, root); }
75. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteRecursively()
View license/** @throws Exception If failed. */ public void testDeleteRecursively() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path someDir2 = new Path(fsHome, "/someDir1/someDir2"); assertTrue(fs.delete(someDir2, true)); assertPathDoesNotExist(fs, someDir2); assertPathDoesNotExist(fs, someDir3); }
76. HadoopIgfs20FileSystemAbstractSelfTest#testDeleteFailsIfNonRecursive()
View license/** @throws Exception If failed. */ public void testDeleteFailsIfNonRecursive() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); final Path someDir2 = new Path(fsHome, "/someDir1/someDir2"); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.delete(someDir2, false); return null; } }, PathIsNotEmptyDirectoryException.class, null); assertPathExists(fs, someDir2); assertPathExists(fs, someDir3); }
77. TestFileLister#testGetJobIdFromPath()
View license@Test public void testGetJobIdFromPath() { String JOB_HISTORY_FILE_NAME = "src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist"; File jobHistoryfile = new File(JOB_HISTORY_FILE_NAME); Path srcPath = new Path(jobHistoryfile.toURI()); String jobId = FileLister.getJobIdFromPath(srcPath); String expJobId = "job_1329348432655_0001"; assertEquals(expJobId, jobId); String JOB_CONF_FILE_NAME = "src/test/resources/job_1329348432655_0001_conf.xml"; File jobConfFile = new File(JOB_CONF_FILE_NAME); srcPath = new Path(jobConfFile.toURI()); jobId = FileLister.getJobIdFromPath(srcPath); assertEquals(expJobId, jobId); jobConfFile = new File("job_201311192236_3583_1386370578196_user1_Sleep+job"); srcPath = new Path(jobConfFile.toURI()); jobId = FileLister.getJobIdFromPath(srcPath); expJobId = "job_201311192236_3583"; assertEquals(expJobId, jobId); }
78. TestCSVFileReader#createCSVFiles()
View license@BeforeClass public static void createCSVFiles() throws IOException { localfs = FileSystem.getLocal(new Configuration()); csvFile = new Path("target/temp.csv"); tsvFile = new Path("target/temp.tsv"); validatorFile = new Path("target/validator.csv"); FSDataOutputStream out = localfs.create(csvFile, true); out.writeBytes(CSV_CONTENT); out.close(); out = localfs.create(validatorFile, true); out.writeBytes(VALIDATOR_CSV_CONTENT); out.close(); out = localfs.create(tsvFile, true); out.writeBytes(TSV_CONTENT); out.close(); }
79. PailInputSplit#setRelPath()
View licenseprivate void setRelPath(FileSystem fs, String root) { Path filePath = super.getPath(); filePath = filePath.makeQualified(fs); Path rootPath = new Path(root).makeQualified(fs); List<String> dirs = new LinkedList<String>(); Path curr = filePath.getParent(); while (!curr.equals(rootPath)) { dirs.add(0, curr.getName()); curr = curr.getParent(); if (curr == null) throw new IllegalArgumentException(filePath.toString() + " is not a subpath of " + rootPath.toString()); } _relPath = Utils.join(dirs, "/"); }
80. PailTap#commitResource()
View license@Override public boolean commitResource(JobConf conf) throws IOException { Pail p = Pail.create(_pailRoot, ((PailScheme) getScheme()).getSpec(), false); FileSystem fs = p.getFileSystem(); Path tmpPath = new Path(_pailRoot, "_temporary"); if (fs.exists(tmpPath)) { LOG.info("Deleting _temporary directory left by Hadoop job: " + tmpPath.toString()); fs.delete(tmpPath, true); } Path tmp2Path = new Path(_pailRoot, "_temporary2"); if (fs.exists(tmp2Path)) { LOG.info("Deleting _temporary2 directory: " + tmp2Path.toString()); fs.delete(tmp2Path, true); } Path logPath = new Path(_pailRoot, "_logs"); if (fs.exists(logPath)) { LOG.info("Deleting _logs directory left by Hadoop job: " + logPath.toString()); fs.delete(logPath, true); } return true; }
81. HadoopFileCacheRepository#computeCachePath()
View licenseprivate Path computeCachePath(Path file) { assert repository != null; String directoryName; Path parent = file.getParent(); if (parent == null) { //$NON-NLS-1$ directoryName = String.format("%08x", 0); } else { //$NON-NLS-1$ directoryName = String.format("%08x", parent.toString().hashCode()); } Path directory = new Path(repository, directoryName); Path target = new Path(directory, file.getName()); return target; }
82. TemporaryOutputRetriever#truncate()
View license@Override public void truncate(TemporaryOutputDescription description, TestContext context) throws IOException { //$NON-NLS-1$ LOG.debug("Deleting output directory: {}", description); VariableTable variables = createVariables(context); Configuration config = configurations.newInstance(); FileSystem fs = FileSystem.get(config); String resolved = variables.parse(description.getPathPrefix(), false); Path path = new Path(resolved); Path output = path.getParent(); Path target; if (output == null) { LOG.warn(MessageFormat.format(//$NON-NLS-1$ Messages.getString("TemporaryOutputRetriever.warnDeleteBaseDirectory"), path)); target = fs.makeQualified(path); } else { //$NON-NLS-1$ LOG.debug("output directory will be deleted: {}", output); target = fs.makeQualified(output); } TemporaryInputPreparator.delete(fs, target); }
83. WindGateHadoopGetTest#multiple()
View license/** * Gets multiple files. * @throws Exception if failed */ @Test public void multiple() throws Exception { Path path1 = new Path(PREFIX, "testing-1"); Path path2 = new Path(PREFIX, "testing-2"); Path path3 = new Path(PREFIX, "testing-3"); put(path1, "Hello1, world!"); put(path2, "Hello2, world!"); put(path3, "Hello3, world!"); ByteArrayOutputStream buffer = new ByteArrayOutputStream(); int result = new WindGateHadoopGet(conf).execute(buffer, path1.toString(), path2.toString(), path3.toString()); assertThat(result, is(0)); Map<String, String> contents = get(buffer.toByteArray()); assertThat(contents.size(), is(3)); assertThat(contents.get("testing-1"), is("Hello1, world!")); assertThat(contents.get("testing-2"), is("Hello2, world!")); assertThat(contents.get("testing-3"), is("Hello3, world!")); }
84. WindGateHadoopGetTest#glob()
View license/** * Gets multiple files using glob. * @throws Exception if failed */ @Test public void glob() throws Exception { Path path1 = new Path(PREFIX, "testing-1"); Path path2 = new Path(PREFIX, "testing-2"); Path path3 = new Path(PREFIX, "testing-3"); put(path1, "Hello1, world!"); put(path2, "Hello2, world!"); put(path3, "Hello3, world!"); ByteArrayOutputStream buffer = new ByteArrayOutputStream(); int result = new WindGateHadoopGet(conf).execute(buffer, new Path(PREFIX, "testing-*").toString()); assertThat(result, is(0)); Map<String, String> contents = get(buffer.toByteArray()); assertThat(contents.size(), is(3)); assertThat(contents.get("testing-1"), is("Hello1, world!")); assertThat(contents.get("testing-2"), is("Hello2, world!")); assertThat(contents.get("testing-3"), is("Hello3, world!")); }
85. UnitTestContext#prepare()
View licenseprotected void prepare(String workflow) throws Exception { mkdir(fs, new Path("/falcon"), new FsPermission((short) 511)); Path wfParent = new Path("/falcon/test"); fs.delete(wfParent, true); Path wfPath = new Path(wfParent, "workflow"); mkdir(fs, wfPath); mkdir(fs, new Path("/falcon/test/workflow/lib")); fs.copyFromLocalFile(false, true, new Path(TestContext.class.getResource("/" + workflow).getPath()), new Path(wfPath, "workflow.xml")); mkdir(fs, new Path(wfParent, "input/2012/04/20/00")); mkdir(fs, new Path(wfParent, "input/2012/04/21/00")); Path outPath = new Path(wfParent, "output"); mkdir(fs, outPath, new FsPermission((short) 511)); }
86. CopyableFile#builder()
View license/** * Get a {@link CopyableFile.Builder}. * * @param originFs {@link FileSystem} where original file exists. * @param origin {@link FileStatus} of the original file. * @param datasetRoot Value of {@link CopyableDataset#datasetRoot} of the dataset creating this {@link CopyableFile}. * @param copyConfiguration {@link CopyConfiguration} for the copy job. * @return a {@link CopyableFile.Builder}. * @deprecated use {@link #fromOriginAndDestination}. This method was changed to remove reliance on dataset root * which is not standard of all datasets. The old functionality on inferring destinations cannot be * achieved without dataset root and common dataset root, so this is an approximation. Copyable datasets * should compute file destinations themselves. */ @Deprecated public static Builder builder(FileSystem originFs, FileStatus origin, Path datasetRoot, CopyConfiguration copyConfiguration) { Path relativePath = PathUtils.relativizePath(origin.getPath(), datasetRoot); Path targetRoot = new Path(copyConfiguration.getPublishDir(), datasetRoot.getName()); Path targetPath = new Path(targetRoot, relativePath); return _hiddenBuilder().originFS(originFs).origin(origin).destination(targetPath).preserve(copyConfiguration.getPreserve()).configuration(copyConfiguration); }
87. HiveDRTool#cleanTempFiles()
View licenseprivate void cleanTempFiles() { Path eventsDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, inputOptions.getJobName()); Path metaFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + META_PATH_FILE_SUFFIX); Path eventsFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + ".id"); try { if (jobFS.exists(metaFilePath)) { jobFS.delete(metaFilePath, true); } if (jobFS.exists(eventsFilePath)) { jobFS.delete(eventsFilePath, true); } } catch (IOException e) { LOG.error("Deleting Temp files failed", e); } }
88. FileSystemStorage#fileSystemEvictor()
View licenseprivate void fileSystemEvictor(String feedPath, String retentionLimit, TimeZone timeZone, Path logFilePath) throws IOException, ELException, FalconException { Path normalizedPath = new Path(feedPath); FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(normalizedPath.toUri()); feedPath = normalizedPath.toUri().getPath(); LOG.info("Normalized path: {}", feedPath); Pair<Date, Date> range = EvictionHelper.getDateRange(retentionLimit); List<Path> toBeDeleted = discoverInstanceToDelete(feedPath, timeZone, range.first, fs); if (toBeDeleted.isEmpty()) { LOG.info("No instances to delete."); return; } DateFormat dateFormat = new SimpleDateFormat(FeedHelper.FORMAT); dateFormat.setTimeZone(timeZone); Path feedBasePath = fs.makeQualified(FeedHelper.getFeedBasePath(feedPath)); for (Path path : toBeDeleted) { deleteInstance(fs, path, feedBasePath); Date date = FeedHelper.getDate(feedPath, new Path(path.toUri().getPath()), timeZone); instanceDates.append(dateFormat.format(date)).append(','); instancePaths.append(path).append(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR); } }
89. FTPFileSystem#rename()
View license/** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * * @param client * @param src * @param dst * @return * @throws IOException */ private boolean rename(FTPClient client, Path src, Path dst) throws IOException { Path workDir = new Path(client.printWorkingDirectory()); Path absoluteSrc = makeAbsolute(workDir, src); Path absoluteDst = makeAbsolute(workDir, dst); if (!exists(client, absoluteSrc)) { throw new IOException("Source path " + src + " does not exist"); } if (exists(client, absoluteDst)) { throw new IOException("Destination path " + dst + " already exist, cannot rename!"); } String parentSrc = absoluteSrc.getParent().toUri().toString(); String parentDst = absoluteDst.getParent().toUri().toString(); String from = src.getName(); String to = dst.getName(); if (!parentSrc.equals(parentDst)) { throw new IOException("Cannot rename parent(source): " + parentSrc + ", parent(destination): " + parentDst); } client.changeWorkingDirectory(parentSrc); boolean renamed = client.rename(from, to); return renamed; }
90. FileSystemContractBaseTest#testWorkingDirectory()
View licensepublic void testWorkingDirectory() throws Exception { Path workDir = path(getDefaultWorkingDirectory()); assertEquals(workDir, fs.getWorkingDirectory()); fs.setWorkingDirectory(path(".")); assertEquals(workDir, fs.getWorkingDirectory()); fs.setWorkingDirectory(path("..")); assertEquals(workDir.getParent(), fs.getWorkingDirectory()); Path relativeDir = path("hadoop"); fs.setWorkingDirectory(relativeDir); assertEquals(relativeDir, fs.getWorkingDirectory()); Path absoluteDir = path("/test/hadoop"); fs.setWorkingDirectory(absoluteDir); assertEquals(absoluteDir, fs.getWorkingDirectory()); }
91. FileSystemContractBaseTest#testMkdirs()
View licensepublic void testMkdirs() throws Exception { Path testDir = path("/test/hadoop"); assertFalse(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); assertTrue(fs.mkdirs(testDir)); assertTrue(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); assertTrue(fs.mkdirs(testDir)); assertTrue(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); Path parentDir = testDir.getParent(); assertTrue(fs.exists(parentDir)); assertFalse(fs.isFile(parentDir)); Path grandparentDir = parentDir.getParent(); assertTrue(fs.exists(grandparentDir)); assertFalse(fs.isFile(grandparentDir)); }
92. TestStickyBit#confirmCanAppend()
View license/** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException { // Create a tmp directory with wide-open permissions and sticky bit Path p = new Path(baseDir, "tmp"); hdfs.mkdirs(p); hdfs.setPermission(p, new FsPermission((short) 01777)); // Write a file to the new tmp directory as a regular user hdfs = logonAs(user1, conf, hdfs); Path file = new Path(p, "foo"); writeFile(hdfs, file); hdfs.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file hdfs = logonAs(user2, conf, hdfs); Path file2 = new Path(p, "foo"); FSDataOutputStream h = hdfs.append(file2); h.write("Some more data".getBytes()); h.close(); }
93. TestDFSShell#createTree()
View licensestatic String createTree(FileSystem fs, String name) throws IOException { // create a tree // ROOT // |- f1 // |- f2 // + sub // |- f3 // |- f4 // ROOT2 // |- f1 String path = "/test/" + name; Path root = mkdir(fs, new Path(path)); Path sub = mkdir(fs, new Path(root, "sub")); Path root2 = mkdir(fs, new Path(path + "2")); writeFile(fs, new Path(root, "f1")); writeFile(fs, new Path(root, "f2")); writeFile(fs, new Path(sub, "f3")); writeFile(fs, new Path(sub, "f4")); writeFile(fs, new Path(root2, "f1")); mkdir(fs, new Path(root2, "sub")); return path; }
94. FTPFileSystem#rename()
View license/** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * * @param client * @param src * @param dst * @return * @throws IOException */ private boolean rename(FTPClient client, Path src, Path dst) throws IOException { Path workDir = new Path(client.printWorkingDirectory()); Path absoluteSrc = makeAbsolute(workDir, src); Path absoluteDst = makeAbsolute(workDir, dst); if (!exists(client, absoluteSrc)) { throw new IOException("Source path " + src + " does not exist"); } if (exists(client, absoluteDst)) { throw new IOException("Destination path " + dst + " already exist, cannot rename!"); } String parentSrc = absoluteSrc.getParent().toUri().toString(); String parentDst = absoluteDst.getParent().toUri().toString(); String from = src.getName(); String to = dst.getName(); if (!parentSrc.equals(parentDst)) { throw new IOException("Cannot rename parent(source): " + parentSrc + ", parent(destination): " + parentDst); } client.changeWorkingDirectory(parentSrc); boolean renamed = client.rename(from, to); return renamed; }
95. FileSystemContractBaseTest#testWorkingDirectory()
View licensepublic void testWorkingDirectory() throws Exception { Path workDir = path(getDefaultWorkingDirectory()); assertEquals(workDir, fs.getWorkingDirectory()); fs.setWorkingDirectory(path(".")); assertEquals(workDir, fs.getWorkingDirectory()); fs.setWorkingDirectory(path("..")); assertEquals(workDir.getParent(), fs.getWorkingDirectory()); Path relativeDir = path("hadoop"); fs.setWorkingDirectory(relativeDir); assertEquals(relativeDir, fs.getWorkingDirectory()); Path absoluteDir = path("/test/hadoop"); fs.setWorkingDirectory(absoluteDir); assertEquals(absoluteDir, fs.getWorkingDirectory()); }
96. FileSystemContractBaseTest#testMkdirs()
View licensepublic void testMkdirs() throws Exception { Path testDir = path("/test/hadoop"); assertFalse(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); assertTrue(fs.mkdirs(testDir)); assertTrue(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); assertTrue(fs.mkdirs(testDir)); assertTrue(fs.exists(testDir)); assertFalse(fs.isFile(testDir)); Path parentDir = testDir.getParent(); assertTrue(fs.exists(parentDir)); assertFalse(fs.isFile(parentDir)); Path grandparentDir = parentDir.getParent(); assertTrue(fs.exists(grandparentDir)); assertFalse(fs.isFile(grandparentDir)); }
97. TestTezCommonUtils#testTezAttemptRecoveryStagingPath()
View license// Testing app attempt specific recovery path staging dir @Test(timeout = 5000) public void testTezAttemptRecoveryStagingPath() throws Exception { String strAppId = "testAppId"; Path stageDir = TezCommonUtils.getTezSystemStagingPath(conf, strAppId); Path recoveryPath = TezCommonUtils.getRecoveryPath(stageDir, conf); Path recoveryStageDir = TezCommonUtils.getAttemptRecoveryPath(recoveryPath, 2); String expectedDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId + Path.SEPARATOR + TezConstants.DAG_RECOVERY_DATA_DIR_NAME + Path.SEPARATOR + "2"; Assert.assertEquals(recoveryStageDir.toString(), expectedDir); }
98. TestSnapshotDescriptionUtils#testCompleteSnapshotWithNoSnapshotDirectoryFailure()
View license/** * Test that we throw an exception if there is no working snapshot directory when we attempt to * 'complete' the snapshot * @throws Exception on failure */ @Test public void testCompleteSnapshotWithNoSnapshotDirectoryFailure() throws Exception { Path snapshotDir = new Path(root, HConstants.SNAPSHOT_DIR_NAME); Path tmpDir = new Path(snapshotDir, ".tmp"); Path workingDir = new Path(tmpDir, "not_a_snapshot"); assertFalse("Already have working snapshot dir: " + workingDir + " but shouldn't. Test file leak?", fs.exists(workingDir)); SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); try { SnapshotDescriptionUtils.completeSnapshot(snapshot, root, workingDir, fs); fail("Shouldn't successfully complete move of a non-existent directory."); } catch (IOException e) { LOG.info("Correctly failed to move non-existant directory: " + e.getMessage()); } }
99. TestCopyRecoveredEditsTask#testNoEditsDir()
View license/** * Check that we don't get an exception if there is no recovered edits directory to copy * @throws Exception on failure */ @Test public void testNoEditsDir() throws Exception { SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); FileSystem fs = UTIL.getTestFileSystem(); Path root = UTIL.getDataTestDir(); String regionName = "regionA"; Path regionDir = new Path(root, regionName); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root); try { // doesn't really matter where the region's snapshot directory is, but this is pretty close Path snapshotRegionDir = new Path(workingDir, regionName); fs.mkdirs(snapshotRegionDir); Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionDir); assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits)); CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir); task.call(); } finally { // cleanup the working directory FSUtils.delete(fs, regionDir, true); FSUtils.delete(fs, workingDir, true); } }
100. OfflineMetaRebuildTestCore#createRegion()
View licenseprotected HRegionInfo createRegion(Configuration conf, final HTable htbl, byte[] startKey, byte[] endKey) throws IOException { HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); HTableDescriptor htd = htbl.getTableDescriptor(); HRegionInfo hri = new HRegionInfo(htbl.getTableName(), startKey, endKey); LOG.info("manually adding regioninfo and hdfs data: " + hri.toString()); Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); FileSystem fs = rootDir.getFileSystem(conf); Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName()); fs.mkdirs(p); Path riPath = new Path(p, HRegion.REGIONINFO_FILE); FSDataOutputStream out = fs.create(riPath); hri.write(out); out.close(); // add to meta. Put put = new Put(hri.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); meta.put(put); meta.flushCommits(); return hri; }