Here are the examples of the java api class org.apache.hadoop.fs.FileSystem taken from open source projects.
1. HadoopUtilsTest#testRenameRecursively()
View license@Test public void testRenameRecursively() throws Exception { FileSystem fs = FileSystem.getLocal(new Configuration()); fs.mkdirs(new Path(hadoopUtilsTestDir, "testRename/a/b/c")); fs.mkdirs(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c")); fs.mkdirs(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/e")); fs.create(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/t1.txt")); fs.create(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/e/t2.txt")); HadoopUtils.renameRecursively(fs, new Path(hadoopUtilsTestDir, "testRenameStaging"), new Path(hadoopUtilsTestDir, "testRename")); Assert.assertTrue(fs.exists(new Path(hadoopUtilsTestDir, "testRename/a/b/c/t1.txt"))); Assert.assertTrue(fs.exists(new Path(hadoopUtilsTestDir, "testRename/a/b/c/e/t2.txt"))); }
2. LinkDb#install()
View licensepublic static void install(JobConf job, Path linkDb) throws IOException { Path newLinkDb = FileOutputFormat.getOutputPath(job); FileSystem fs = new JobClient(job).getFs(); Path old = new Path(linkDb, "old"); Path current = new Path(linkDb, CURRENT_NAME); if (fs.exists(current)) { if (fs.exists(old)) fs.delete(old, true); fs.rename(current, old); } fs.mkdirs(linkDb); fs.rename(newLinkDb, current); if (fs.exists(old)) fs.delete(old, true); LockUtil.removeLockFile(fs, new Path(linkDb, LOCK_NAME)); }
3. NutchData#generate()
View license/* private void test2LevelMapFile(Path furl) throws IOException { JobConf job = new JobConf(); FileSystem fs = FileSystem.get(job); MapFile.Reader reader = new MapFile.Reader(fs, furl.toString(), job); Text value = new Text(); reader.get(new LongWritable(1000), value); if (null != value) { log.info("---Find it: <1000, " + value + ">"); } } */ public void generate() throws Exception { init(); createNutchUrls(); createNutchIndexData(); Path ffetch = new Path(options.getResultPath(), CrawlDatum.FETCH_DIR_NAME); Path fparse = new Path(options.getResultPath(), CrawlDatum.PARSE_DIR_NAME); Path linkdb = new Path(segment, LINKDB_DIR_NAME); FileSystem fs = ffetch.getFileSystem(new Configuration()); fs.rename(ffetch, new Path(segment, CrawlDatum.FETCH_DIR_NAME)); fs.rename(fparse, new Path(segment, CrawlDatum.PARSE_DIR_NAME)); fs.rename(linkdb, new Path(options.getResultPath(), LINKDB_DIR_NAME)); fs.close(); close(); }
4. MapReduceTestUtil#createJob()
View licensepublic static Job createJob(Configuration conf, Path inDir, Path outDir, int numInputFiles, int numReds, String input) throws IOException { Job job = new Job(conf); FileSystem fs = FileSystem.get(conf); if (fs.exists(outDir)) { fs.delete(outDir, true); } if (fs.exists(inDir)) { fs.delete(inDir, true); } fs.mkdirs(inDir); for (int i = 0; i < numInputFiles; ++i) { DataOutputStream file = fs.create(new Path(inDir, "part-" + i)); file.writeBytes(input); file.close(); } FileInputFormat.setInputPaths(job, inDir); FileOutputFormat.setOutputPath(job, outDir); job.setNumReduceTasks(numReds); return job; }
5. PailTap#commitResource()
View license@Override public boolean commitResource(JobConf conf) throws IOException { Pail p = Pail.create(_pailRoot, ((PailScheme) getScheme()).getSpec(), false); FileSystem fs = p.getFileSystem(); Path tmpPath = new Path(_pailRoot, "_temporary"); if (fs.exists(tmpPath)) { LOG.info("Deleting _temporary directory left by Hadoop job: " + tmpPath.toString()); fs.delete(tmpPath, true); } Path tmp2Path = new Path(_pailRoot, "_temporary2"); if (fs.exists(tmp2Path)) { LOG.info("Deleting _temporary2 directory: " + tmp2Path.toString()); fs.delete(tmp2Path, true); } Path logPath = new Path(_pailRoot, "_logs"); if (fs.exists(logPath)) { LOG.info("Deleting _logs directory left by Hadoop job: " + logPath.toString()); fs.delete(logPath, true); } return true; }
6. MapReduceTestUtil#createJob()
View licensepublic static Job createJob(Configuration conf, Path inDir, Path outDir, int numInputFiles, int numReds, String input) throws IOException { Job job = new Job(conf); FileSystem fs = FileSystem.get(conf); if (fs.exists(outDir)) { fs.delete(outDir, true); } if (fs.exists(inDir)) { fs.delete(inDir, true); } fs.mkdirs(inDir); for (int i = 0; i < numInputFiles; ++i) { DataOutputStream file = fs.create(new Path(inDir, "part-" + i)); file.writeBytes(input); file.close(); } FileInputFormat.setInputPaths(job, inDir); FileOutputFormat.setOutputPath(job, outDir); job.setNumReduceTasks(numReds); return job; }
7. TestMiniMRChildTask#launchTest()
View license/** * Launch tests * @param conf Configuration of the mapreduce job. * @param inDir input path * @param outDir output path * @param input Input text * @throws IOException */ public void launchTest(JobConf conf, Path inDir, Path outDir, String input) throws IOException { configure(conf, inDir, outDir, input, MapClass.class, IdentityReducer.class); FileSystem outFs = outDir.getFileSystem(conf); // Launch job with default option for temp dir. // i.e. temp dir is ./tmp JobClient.runJob(conf); outFs.delete(outDir, true); // Launch job by giving relative path to temp dir. conf.set("mapred.child.tmp", "../temp"); JobClient.runJob(conf); outFs.delete(outDir, true); // Launch job by giving absolute path to temp dir conf.set("mapred.child.tmp", "/tmp"); JobClient.runJob(conf); outFs.delete(outDir, true); }
8. LinkDbMerger#merge()
View licensepublic void merge(Path output, Path[] dbs, boolean normalize, boolean filter) throws Exception { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); long start = System.currentTimeMillis(); LOG.info("LinkDb merge: starting at " + sdf.format(start)); JobConf job = createMergeJob(getConf(), output, normalize, filter); for (int i = 0; i < dbs.length; i++) { FileInputFormat.addInputPath(job, new Path(dbs[i], LinkDb.CURRENT_NAME)); } JobClient.runJob(job); FileSystem fs = FileSystem.get(getConf()); fs.mkdirs(output); fs.rename(FileOutputFormat.getOutputPath(job), new Path(output, LinkDb.CURRENT_NAME)); long end = System.currentTimeMillis(); LOG.info("LinkDb merge: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end)); }
9. CrawlDbMerger#merge()
View licensepublic void merge(Path output, Path[] dbs, boolean normalize, boolean filter) throws Exception { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); long start = System.currentTimeMillis(); LOG.info("CrawlDb merge: starting at " + sdf.format(start)); JobConf job = createMergeJob(getConf(), output, normalize, filter); for (int i = 0; i < dbs.length; i++) { if (LOG.isInfoEnabled()) { LOG.info("Adding " + dbs[i]); } FileInputFormat.addInputPath(job, new Path(dbs[i], CrawlDb.CURRENT_NAME)); } JobClient.runJob(job); FileSystem fs = FileSystem.get(getConf()); fs.mkdirs(output); fs.rename(FileOutputFormat.getOutputPath(job), new Path(output, CrawlDb.CURRENT_NAME)); long end = System.currentTimeMillis(); LOG.info("CrawlDb merge: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end)); }
10. TestJobStatusPersistency#testLocalPersistency()
View license/** * Test if the completed job status is persisted to localfs. */ public void testLocalPersistency() throws Exception { FileSystem fs = FileSystem.getLocal(createJobConf()); fs.delete(TEST_DIR, true); Properties config = new Properties(); config.setProperty("mapred.job.tracker.persist.jobstatus.active", "true"); config.setProperty("mapred.job.tracker.persist.jobstatus.hours", "1"); config.setProperty("mapred.job.tracker.persist.jobstatus.dir", fs.makeQualified(TEST_DIR).toString()); stopCluster(); startCluster(false, config); JobID jobId = runJob(); JobClient jc = new JobClient(createJobConf()); RunningJob rj = jc.getJob(jobId); assertNotNull(rj); // check if the local fs has the data Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info"); assertTrue("Missing job info from the local fs", fs.exists(jobInfo)); fs.delete(TEST_DIR, true); }
11. NNBench#cleanupBeforeTestrun()
View license// private static Configuration config = new Configuration(); /** * Clean up the files before a test run * * @throws IOException on error */ private static void cleanupBeforeTestrun(Configuration config) throws IOException { FileSystem tempFS = FileSystem.get(config); // Delete the data directory only if it is the create/write operation if (operation.equals(OP_CREATE_WRITE)) { LOG.info("Deleting data directory"); tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true); } tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true); tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true); }
12. HdfsProducerConsumerIntegrationTest#tearDown()
View license@Override @After public void tearDown() throws Exception { super.tearDown(); Thread.sleep(250); Configuration conf = new Configuration(); Path dir = new Path("hdfs://localhost:9000/tmp/test"); FileSystem fs = FileSystem.get(dir.toUri(), conf); fs.delete(dir, true); fs.delete(new Path("hdfs://localhost:9000/tmp/test/multiple-consumers"), true); }
13. HdfsAppendTest#tearDown()
View license@Override public void tearDown() throws Exception { super.tearDown(); Thread.sleep(250); Configuration conf = new Configuration(); Path dir = new Path("hdfs://localhost:9000/tmp/test"); FileSystem fs = FileSystem.get(dir.toUri(), conf); fs.delete(dir, true); dir = new Path("hdfs://localhost:9000/tmp/test-dynamic"); fs.delete(dir, true); }
14. TestDFSWrite#testDirectWrite()
View license@Test public void testDirectWrite() throws IOException { FlumeConfiguration conf = FlumeConfiguration.get(); Path path = new Path("file:///tmp/testfile"); FileSystem hdfs = path.getFileSystem(conf); hdfs.deleteOnExit(path); String STRING = "Hello World"; // writing FSDataOutputStream dos = hdfs.create(path); dos.writeUTF(STRING); dos.close(); // reading FSDataInputStream dis = hdfs.open(path); String s = dis.readUTF(); System.out.println(s); assertEquals(STRING, s); dis.close(); hdfs.close(); }
15. Util#createInputFile()
View licensepublic static OutputStream createInputFile(MiniGenericCluster cluster, String fileName) throws IOException { FileSystem fs = cluster.getFileSystem(); if (Util.WINDOWS) { fileName = fileName.replace('\\', '/'); } if (fs.exists(new Path(fileName))) { throw new IOException("File " + fileName + " already exists on the minicluster"); } return fs.create(new Path(fileName)); }
16. TestMultiStorage#setUp()
View license@Override @Before public void setUp() throws Exception { createFile(); FileSystem fs = FileSystem.getLocal(new Configuration()); Path localOut = new Path("local-out"); Path dummy = new Path("dummy"); if (fs.exists(localOut)) { fs.delete(localOut, true); } if (fs.exists(dummy)) { fs.delete(dummy, true); } }
17. DeprecatedOutputFormatTest#runMapReduceJob()
View licenseprivate void runMapReduceJob(CompressionCodecName codec) throws IOException, ClassNotFoundException, InterruptedException { final FileSystem fileSystem = parquetPath.getFileSystem(conf); fileSystem.delete(parquetPath, true); fileSystem.delete(outputPath, true); { jobConf.setInputFormat(TextInputFormat.class); TextInputFormat.addInputPath(jobConf, inputPath); jobConf.setNumReduceTasks(0); jobConf.setOutputFormat(DeprecatedParquetOutputFormat.class); DeprecatedParquetOutputFormat.setCompression(jobConf, codec); DeprecatedParquetOutputFormat.setOutputPath(jobConf, parquetPath); DeprecatedParquetOutputFormat.setWriteSupportClass(jobConf, GroupWriteSupport.class); GroupWriteSupport.setSchema(MessageTypeParser.parseMessageType(writeSchema), jobConf); jobConf.setMapperClass(DeprecatedMapper.class); mapRedJob = JobClient.runJob(jobConf); } }
18. TestSpecificInputOutputFormat#createParquetFile()
View license@Before public void createParquetFile() throws Exception { final FileSystem fileSystem = parquetPath.getFileSystem(conf); fileSystem.delete(parquetPath, true); fileSystem.delete(outputPath, true); { final Job job = new Job(conf, "write"); // input not really used TextInputFormat.addInputPath(job, inputPath); job.setInputFormatClass(TextInputFormat.class); job.setMapperClass(TestSpecificInputOutputFormat.MyMapper.class); job.setNumReduceTasks(0); job.setOutputFormatClass(AvroParquetOutputFormat.class); AvroParquetOutputFormat.setOutputPath(job, parquetPath); AvroParquetOutputFormat.setSchema(job, Car.SCHEMA$); waitForJob(job); } }
19. TezClientUtils#getLRFileStatus()
View licenseprivate static FileStatus[] getLRFileStatus(String fileName, Configuration conf) throws IOException { URI uri; try { uri = new URI(fileName); } catch (URISyntaxException e) { String message = "Invalid URI defined in configuration for" + " location of TEZ jars. providedURI=" + fileName; LOG.error(message); throw new TezUncheckedException(message, e); } Path p = new Path(uri); FileSystem fs = p.getFileSystem(conf); p = fs.resolvePath(p.makeQualified(fs.getUri(), fs.getWorkingDirectory())); FileSystem targetFS = p.getFileSystem(conf); if (targetFS.isDirectory(p)) { return targetFS.listStatus(p); } else { FileStatus fStatus = targetFS.getFileStatus(p); return new FileStatus[] { fStatus }; } }
20. TestLargeObjectLoader#setUp()
View licensepublic void setUp() throws IOException, InterruptedException { conf = new Configuration(); if (!BaseSqoopTestCase.isOnPhysicalCluster()) { conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS); } String tmpDir = System.getProperty("test.build.data", "/tmp/"); this.outDir = new Path(System.getProperty("java.io.tmpdir")); FileSystem fs = FileSystem.get(conf); if (fs.exists(outDir)) { fs.delete(outDir, true); } fs.mkdirs(outDir); loader = new LargeObjectLoader(conf, outDir); }
21. TestFSBase#createPath()
View licenseprotected void createPath(Path relativePath) throws Exception { Path fullPath = getFullPathWithSchemeAndAuthority(relativePath); FileSystem adminFS = storageFileSystem; LOGGER.info("Creating path " + fullPath); if (storageDFSType.equals(DFSType.ClusterDFS)) { UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(dfsAdmin, KEYTAB_LOCATION + "/" + dfsAdmin + ".keytab"); adminFS = getFS(ugi); } if (adminFS.exists(fullPath)) { adminFS.delete(fullPath, true); } adminFS.mkdirs(fullPath); }
22. SortBasedColPartitionStoreExec#getAppender()
View licenseprivate Appender getAppender(String partition) throws IOException { Path dataFile = getDataFile(partition); FileSystem fs = dataFile.getFileSystem(context.getConf()); if (fs.exists(dataFile.getParent())) { LOG.info("Path " + dataFile.getParent() + " already exists!"); } else { fs.mkdirs(dataFile.getParent()); LOG.info("Add subpartition path directory :" + dataFile.getParent()); } if (fs.exists(dataFile)) { LOG.info("File " + dataFile + " already exists!"); FileStatus status = fs.getFileStatus(dataFile); LOG.info("File size: " + status.getLen()); } appender = StorageManagerFactory.getStorageManager(context.getConf()).getAppender(meta, outSchema, dataFile); appender.enableStats(); appender.init(); return appender; }
23. TestJobHistory#cleanupLocalFiles()
View licenseprivate void cleanupLocalFiles(MiniMRCluster mr) throws IOException { Configuration conf = mr.createJobConf(); JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); Path sysDir = new Path(jt.getSystemDir()); FileSystem fs = sysDir.getFileSystem(conf); fs.delete(sysDir, true); Path jobHistoryDir = mr.getJobTrackerRunner().getJobTracker().getJobHistory().getJobHistoryLocation(); fs = jobHistoryDir.getFileSystem(conf); fs.delete(jobHistoryDir, true); }
24. TestHFileCleaner#testTTLCleaner()
View license@Test public void testTTLCleaner() throws IOException, InterruptedException { FileSystem fs = UTIL.getDFSCluster().getFileSystem(); Path root = UTIL.getDataTestDir(); Path file = new Path(root, "file"); fs.createNewFile(file); long createTime = System.currentTimeMillis(); assertTrue("Test file not created!", fs.exists(file)); TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner(); // update the time info for the file, so the cleaner removes it fs.setTimes(file, createTime - 100, -1); Configuration conf = UTIL.getConfiguration(); conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100); cleaner.setConf(conf); assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs) + " with create time:" + createTime, cleaner.isFileDeletable(file)); }
25. Utils#checkHdfsPath()
View licensepublic static final void checkHdfsPath(Path path, boolean mkdir) throws IOException { FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) { fs.delete(path, true); } if (mkdir) { fs.mkdirs(path); } fs.close(); }
26. TestSeveral#makeInput()
View license/** * Utility class to create input for the jobs * @param inDir * @param conf * @throws IOException */ private void makeInput(Path inDir, JobConf conf) throws IOException { FileSystem inFs = inDir.getFileSystem(conf); if (inFs.exists(inDir)) { inFs.delete(inDir, true); } inFs.mkdirs(inDir); Path inFile = new Path(inDir, "part-0"); DataOutputStream file = inFs.create(inFile); for (int i = 0; i < numReduces; i++) { file.writeBytes("b a\n"); } file.close(); }
27. TestJobStatusPersistency#testLocalPersistency()
View license/** * Test if the completed job status is persisted to localfs. */ public void testLocalPersistency() throws Exception { FileSystem fs = FileSystem.getLocal(createJobConf()); fs.delete(TEST_DIR, true); Properties config = new Properties(); config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "true"); config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_DIR, fs.makeQualified(TEST_DIR).toString()); stopCluster(); startCluster(false, config); JobID jobId = runJob(); JobClient jc = new JobClient(createJobConf()); RunningJob rj = jc.getJob(jobId); assertNotNull(rj); // check if the local fs has the data Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info"); assertTrue("Missing job info from the local fs", fs.exists(jobInfo)); fs.delete(TEST_DIR, true); }
28. TestHistoryParser#getDagInfoFromSimpleHistory()
View licenseprivate DagInfo getDagInfoFromSimpleHistory(String dagId) throws TezException, IOException { TezDAGID tezDAGID = TezDAGID.fromString(dagId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(tezDAGID.getApplicationId(), 1); Path historyPath = new Path(conf.get("fs.defaultFS") + SIMPLE_HISTORY_DIR + HISTORY_TXT + "." + applicationAttemptId); FileSystem fs = historyPath.getFileSystem(conf); Path localPath = new Path(DOWNLOAD_DIR, HISTORY_TXT); fs.copyToLocalFile(historyPath, localPath); File localFile = new File(DOWNLOAD_DIR, HISTORY_TXT); //Now parse via SimpleHistory SimpleHistoryParser parser = new SimpleHistoryParser(localFile); DagInfo dagInfo = parser.getDAGData(dagId); assertTrue(dagInfo.getDagId().equals(dagId)); return dagInfo; }
29. TestTezCommonUtils#testCreateTezSysStagingPath()
View license// Testing System staging dir if createed @Test(timeout = 5000) public void testCreateTezSysStagingPath() throws Exception { String strAppId = "testAppId"; String expectedStageDir = RESOLVED_STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId; String unResolvedStageDir = STAGE_DIR + Path.SEPARATOR + TezCommonUtils.TEZ_SYSTEM_SUB_DIR + Path.SEPARATOR + strAppId; Path stagePath = new Path(unResolvedStageDir); FileSystem fs = stagePath.getFileSystem(conf); if (fs.exists(stagePath)) { fs.delete(stagePath, true); } Assert.assertFalse(fs.exists(stagePath)); Path stageDir = TezCommonUtils.createTezSystemStagingPath(conf, strAppId); Assert.assertEquals(stageDir.toString(), expectedStageDir); Assert.assertTrue(fs.exists(stagePath)); }
30. TestUtils#getStream()
View licensepublic static FSDataInputStream getStream(ByteBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeByte(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
31. TestUtils#getStream()
View licensepublic static FSDataInputStream getStream(IntBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeInt(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
32. TestUtils#getStream()
View licensepublic static FSDataInputStream getStream(ShortBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeShort(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
33. TestUtils#getStream()
View licensepublic static FSDataInputStream getStream(LongBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeLong(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
34. SQLServerDatatypeExportDelimitedFileManualTest#createFile()
View licensepublic void createFile(DATATYPES dt, String[] data) throws IOException { Path tablePath = getTablePath(dt); Path filePath = new Path(tablePath, "part0000"); Configuration conf = new Configuration(); String hdfsroot; hdfsroot = System.getProperty("ms.datatype.test.hdfsprefix"); if (hdfsroot == null) { hdfsroot = "hdfs://localhost/"; } conf.set("fs.default.name", hdfsroot); FileSystem fs = FileSystem.get(conf); fs.mkdirs(tablePath); System.out.println("-----------------------------------Path : " + filePath); OutputStream os = fs.create(filePath); BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os)); for (int i = 0; i < data.length; i++) { w.write(data[i] + "\n"); } w.close(); os.close(); }
35. NetezzaExportManualTest#createExportFile()
View licenseprotected void createExportFile(ColumnGenerator... extraCols) throws IOException { String ext = ".txt"; Path tablePath = getTablePath(); Path filePath = new Path(tablePath, "part0" + ext); Configuration conf = new Configuration(); if (!BaseSqoopTestCase.isOnPhysicalCluster()) { conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS); } FileSystem fs = FileSystem.get(conf); fs.mkdirs(tablePath); OutputStream os = fs.create(filePath); BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os)); for (int i = 0; i < 3; i++) { String line = getRecordLine(i, extraCols); w.write(line); LOG.debug("Create Export file - Writing line : " + line); } w.close(); os.close(); }
36. TestSplittableBufferedWriter#ensureEmptyWriteDir()
View license/** Create the directory where we'll write our test files to; and * make sure it has no files in it. */ private void ensureEmptyWriteDir() throws IOException { FileSystem fs = FileSystem.getLocal(getConf()); Path writeDir = getWritePath(); fs.mkdirs(writeDir); FileStatus[] stats = fs.listStatus(writeDir); for (FileStatus stat : stats) { if (stat.isDir()) { fail("setUp(): Write directory " + writeDir + " contains subdirectories"); } LOG.debug("setUp(): Removing " + stat.getPath()); if (!fs.delete(stat.getPath(), false)) { fail("setUp(): Could not delete residual file " + stat.getPath()); } } if (!fs.exists(writeDir)) { fail("setUp: Could not create " + writeDir); } }
37. DirectImportUtils#createHdfsSink()
View license/** * Open a file in HDFS for write to hold the data associated with a table. * Creates any necessary directories, and returns the OutputStream to write * to. The caller is responsible for calling the close() method on the * returned stream. */ public static SplittableBufferedWriter createHdfsSink(Configuration conf, SqoopOptions options, ImportJobContext context) throws IOException { Path destDir = context.getDestination(); FileSystem fs = destDir.getFileSystem(conf); LOG.debug("Writing to filesystem: " + fs.getUri()); LOG.debug("Creating destination directory " + destDir); fs.mkdirs(destDir); // This Writer will be closed by the caller. return new SplittableBufferedWriter(new SplittingOutputStream(conf, destDir, "part-m-", options.getDirectSplitSize(), getCodec(conf, options))); }
38. ImportTool#deleteTargetDir()
View licenseprivate void deleteTargetDir(ImportJobContext context) throws IOException { SqoopOptions options = context.getOptions(); FileSystem fs = FileSystem.get(options.getConf()); Path destDir = context.getDestination(); if (fs.exists(destDir)) { fs.delete(destDir, true); LOG.info("Destination directory " + destDir + " deleted."); return; } else { LOG.info("Destination directory " + destDir + " is not present, " + "hence not deleting."); } }
39. AbstractSolrSentryTestBase#setupSentry()
View licensepublic static File setupSentry() throws Exception { File sentrySite = File.createTempFile("sentry-site", "xml"); sentrySite.deleteOnExit(); File authProviderDir = new File(RESOURCES_DIR, "sentry"); String authProviderName = "test-authz-provider.ini"; FileSystem clusterFs = dfsCluster.getFileSystem(); clusterFs.copyFromLocalFile(false, new Path(authProviderDir.toString(), authProviderName), new Path(authProviderName)); // need to write sentry-site at execution time because we don't know // the location of sentry.solr.provider.resource beforehand StringBuilder sentrySiteData = new StringBuilder(); sentrySiteData.append("<configuration>\n"); addPropertyToSentry(sentrySiteData, "sentry.provider", "org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider"); addPropertyToSentry(sentrySiteData, "sentry.solr.provider.resource", clusterFs.getWorkingDirectory() + File.separator + authProviderName); sentrySiteData.append("</configuration>\n"); FileUtils.writeStringToFile(sentrySite, sentrySiteData.toString()); return sentrySite; }
40. AlertJanitor#writeLogs()
View license/** * Writes out logs to the given path as a separate JSON message per line. * * @param queue * @param path * @throws IOException */ public void writeLogs(String queue, Path path) throws IOException { FileSystem fs = FileSystem.get(path.toUri(), conf); DataOutputStream fout = fs.create(path); do { List<Message> messages = pull(queue, batchCount); if (messages.isEmpty()) { break; } for (Message m : messages) { fout.write((m.getBody().replaceAll("[\n|\r]", " ") + "\n").getBytes("UTF8")); } delete(queue, messages); } while (true); fout.close(); fs.close(); }
41. UnknownTextSchemaDescriptor#computeSchema()
View licensevoid computeSchema() throws IOException { this.randId = new Random().nextInt(); LearnStructure ls = new LearnStructure(); FileSystem fs = FSAnalyzer.getInstance().getFS(); FileSystem localFS = FileSystem.getLocal(new Configuration()); Path inputPath = dd.getFilename(); File workingParserFile = File.createTempFile("textdesc", "typetree", null); File workingSchemaFile = File.createTempFile("textdesc", "schema", null); ls.inferRecordFormat(fs, inputPath, localFS, new Path(workingSchemaFile.getCanonicalPath()), new Path(workingParserFile.getCanonicalPath()), null, null, false, MAX_LINES); this.schema = Schema.parse(workingSchemaFile); DataInputStream in = new DataInputStream(localFS.open(new Path(workingParserFile.getCanonicalPath()))); try { this.typeTree = InferredType.readType(in); } catch (IOException iex) { iex.printStackTrace(); throw iex; } finally { in.close(); } //System.err.println("Recovered unknowntext schema: " + schema); }
42. AbstractTestHiveClient#listAllDataFiles()
View licenseprotected Set<String> listAllDataFiles(Path path) throws IOException { Set<String> result = new HashSet<>(); FileSystem fileSystem = hdfsEnvironment.getFileSystem("user", path); if (fileSystem.exists(path)) { for (FileStatus fileStatus : fileSystem.listStatus(path)) { if (HadoopFileStatus.isFile(fileStatus)) { result.add(fileStatus.getPath().toString()); } else if (HadoopFileStatus.isDirectory(fileStatus)) { result.addAll(listAllDataFiles(fileStatus.getPath())); } } } return result; }
43. TestTezAutoParallelism#testGroupBy()
View license@Test public void testGroupBy() throws IOException { // parallelism is 3 originally, reduce to 1 pigServer.getPigContext().getProperties().setProperty(PigConfiguration.PIG_NO_SPLIT_COMBINATION, "true"); pigServer.getPigContext().getProperties().setProperty(MRConfiguration.MAX_SPLIT_SIZE, "3000"); pigServer.getPigContext().getProperties().setProperty(InputSizeReducerEstimator.BYTES_PER_REDUCER_PARAM, Long.toString(InputSizeReducerEstimator.DEFAULT_BYTES_PER_REDUCER)); pigServer.registerQuery("A = load '" + INPUT_FILE1 + "' as (name:chararray, age:int);"); pigServer.registerQuery("B = group A by name;"); pigServer.store("B", "output1"); FileSystem fs = cluster.getFileSystem(); FileStatus[] files = fs.listStatus(new Path("output1"), new PathFilter() { @Override public boolean accept(Path path) { if (path.getName().startsWith("part")) { return true; } return false; } }); assertEquals(files.length, 1); fs.delete(new Path("output1"), true); }
44. HPath#copy()
View licensepublic void copy(ElementDescriptor dstName, Properties dstConfiguration, boolean removeSrc) throws IOException { FileSystem srcFS = this.fs.getHFS(); FileSystem dstFS = ((HPath) dstName).fs.getHFS(); Path srcPath = this.path; Path dstPath = ((HPath) dstName).path; boolean result = FileUtil.copy(srcFS, srcPath, dstFS, dstPath, false, new Configuration()); if (!result) { int errCode = 2097; String msg = "Failed to copy from: " + this.toString() + " to: " + dstName.toString(); throw new ExecException(msg, errCode, PigException.BUG); } }
45. TestThriftToParquetFileWriter#createFile()
View licenseprivate <T extends TBase<?, ?>> Path createFile(T... tObjs) throws IOException, InterruptedException, TException { final Path fileToCreate = new Path("target/test/TestThriftToParquetFileWriter/" + tObjs[0].getClass() + ".parquet"); LOG.info("File created: " + fileToCreate.toString()); Configuration conf = new Configuration(); final FileSystem fs = fileToCreate.getFileSystem(conf); if (fs.exists(fileToCreate)) { fs.delete(fileToCreate, true); } TProtocolFactory protocolFactory = new TCompactProtocol.Factory(); TaskAttemptID taskId = new TaskAttemptID("local", 0, true, 0, 0); ThriftToParquetFileWriter w = new ThriftToParquetFileWriter(fileToCreate, ContextUtil.newTaskAttemptContext(conf, taskId), protocolFactory, (Class<? extends TBase<?, ?>>) tObjs[0].getClass()); for (T tObj : tObjs) { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final TProtocol protocol = protocolFactory.getProtocol(new TIOStreamTransport(baos)); tObj.write(protocol); w.write(new BytesWritable(baos.toByteArray())); } w.close(); return fileToCreate; }
46. ParquetScroogeSchemeTest#doRead()
View licenseprivate void doRead() throws Exception { Path path = new Path(txtOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new ParquetScroogeScheme<Name>(Name.class); Tap source = new Hfs(sourceScheme, parquetOutputPath); Scheme sinkScheme = new TextLine(new Fields("first", "last")); Tap sink = new Hfs(sinkScheme, txtOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new UnpackThriftFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); String result = FileUtils.readFileToString(new File(txtOutputPath + "/part-00000")); assertEquals("0\tAlice\tPractice\n15\tBob\tHope\n24\tCharlie\tHorse\n", result); }
47. ParquetScroogeSchemeTest#doWrite()
View licenseprivate void doWrite() throws Exception { Path path = new Path(parquetOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new TextLine(new Fields("first", "last")); Tap source = new Hfs(sourceScheme, txtInputPath); Scheme sinkScheme = new ParquetScroogeScheme<Name>(Name.class); Tap sink = new Hfs(sinkScheme, parquetOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new PackThriftFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); }
48. TestParquetTBaseScheme#doRead()
View licenseprivate void doRead(Scheme sourceScheme) throws Exception { createFileForRead(); Path path = new Path(txtOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Tap source = new Hfs(sourceScheme, parquetInputPath); Scheme sinkScheme = new TextLine(new Fields("first", "last")); Tap sink = new Hfs(sinkScheme, txtOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new UnpackThriftFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); String result = FileUtils.readFileToString(new File(txtOutputPath + "/part-00000")); assertEquals("Alice\tPractice\nBob\tHope\nCharlie\tHorse\n", result); }
49. TestParquetTBaseScheme#testWrite()
View license@Test public void testWrite() throws Exception { Path path = new Path(parquetOutputPath); JobConf jobConf = new JobConf(); final FileSystem fs = path.getFileSystem(jobConf); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new TextLine(new Fields("first", "last")); Tap source = new Hfs(sourceScheme, txtInputPath); Scheme sinkScheme = new ParquetTBaseScheme(Name.class); Tap sink = new Hfs(sinkScheme, parquetOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new PackThriftFunction()); HadoopFlowConnector hadoopFlowConnector = new HadoopFlowConnector(); Flow flow = hadoopFlowConnector.connect("namecp", source, sink, assembly); flow.complete(); assertTrue(fs.exists(new Path(parquetOutputPath))); assertTrue(fs.exists(new Path(parquetOutputPath + "/_metadata"))); assertTrue(fs.exists(new Path(parquetOutputPath + "/_common_metadata"))); }
50. TestParquetTupleScheme#testReadWrite()
View licensepublic void testReadWrite(String inputPath) throws Exception { createFileForRead(); Path path = new Path(txtOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new ParquetTupleScheme(new Fields("first_name", "last_name")); Tap source = new Hfs(sourceScheme, inputPath); Scheme sinkScheme = new TextLine(new Fields("first", "last")); Tap sink = new Hfs(sinkScheme, txtOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new UnpackTupleFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); String result = FileUtils.readFileToString(new File(txtOutputPath + "/part-00000")); assertEquals("Alice\tPractice\nBob\tHope\nCharlie\tHorse\n", result); }
51. TestParquetTupleScheme#testFieldProjection()
View license@Test public void testFieldProjection() throws Exception { createFileForRead(); Path path = new Path(txtOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new ParquetTupleScheme(new Fields("last_name")); Tap source = new Hfs(sourceScheme, parquetInputPath); Scheme sinkScheme = new TextLine(new Fields("last_name")); Tap sink = new Hfs(sinkScheme, txtOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new ProjectedTupleFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); String result = FileUtils.readFileToString(new File(txtOutputPath + "/part-00000")); assertEquals("Practice\nHope\nHorse\n", result); }
52. TestParquetTBaseScheme#doRead()
View licenseprivate void doRead(Scheme sourceScheme) throws Exception { createFileForRead(); Path path = new Path(txtOutputPath); final FileSystem fs = path.getFileSystem(new Configuration()); if (fs.exists(path)) fs.delete(path, true); Tap source = new Hfs(sourceScheme, parquetInputPath); Scheme sinkScheme = new TextLine(new Fields("first", "last")); Tap sink = new Hfs(sinkScheme, txtOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new UnpackThriftFunction()); Flow flow = new HadoopFlowConnector().connect("namecp", source, sink, assembly); flow.complete(); String result = FileUtils.readFileToString(new File(txtOutputPath + "/part-00000")); assertEquals("Alice\tPractice\nBob\tHope\nCharlie\tHorse\n", result); }
53. TestParquetTBaseScheme#testWrite()
View license@Test public void testWrite() throws Exception { Path path = new Path(parquetOutputPath); JobConf jobConf = new JobConf(); final FileSystem fs = path.getFileSystem(jobConf); if (fs.exists(path)) fs.delete(path, true); Scheme sourceScheme = new TextLine(new Fields("first", "last")); Tap source = new Hfs(sourceScheme, txtInputPath); Scheme sinkScheme = new ParquetTBaseScheme(Name.class); Tap sink = new Hfs(sinkScheme, parquetOutputPath); Pipe assembly = new Pipe("namecp"); assembly = new Each(assembly, new PackThriftFunction()); HadoopFlowConnector hadoopFlowConnector = new HadoopFlowConnector(); Flow flow = hadoopFlowConnector.connect("namecp", source, sink, assembly); flow.complete(); assertTrue(fs.exists(new Path(parquetOutputPath))); assertTrue(fs.exists(new Path(parquetOutputPath + "/_metadata"))); assertTrue(fs.exists(new Path(parquetOutputPath + "/_common_metadata"))); }
54. TestSshActionExecutor#setUp()
View license@Override protected void setUp() throws Exception { super.setUp(); services = new Services(); services.init(); XConfiguration conf = new XConfiguration(); conf.setStrings(WorkflowAppService.HADOOP_USER, getTestUser()); conf.setStrings(WorkflowAppService.HADOOP_UGI, getTestUser() + "," + getTestGroup()); Path path = new Path(getNameNodeUri(), getTestCaseDir()); FileSystem fs = getFileSystem(); fs.delete(path, true); }
55. HdfsThreadLeakTest#testBasic()
View license@Test public void testBasic() throws IOException { String uri = HdfsTestUtil.getURI(dfsCluster); Path path = new Path(uri); Configuration conf = new Configuration(); conf.setBoolean("fs.hdfs.impl.disable.cache", true); FileSystem fs = FileSystem.get(path.toUri(), conf); Path testFile = new Path(uri.toString() + "/testfile"); FSDataOutputStream out = fs.create(testFile); out.write(5); out.hflush(); out.close(); ((DistributedFileSystem) fs).recoverLease(testFile); fs.close(); }
56. SolrOutputFormat#addSolrConfToDistributedCache()
View licensepublic static void addSolrConfToDistributedCache(Job job, File solrHomeZip) throws IOException { // Make a reasonably unique name for the zip file in the distributed cache // to avoid collisions if multiple jobs are running. String hdfsZipName = UUID.randomUUID().toString() + '.' + ZIP_FILE_BASE_NAME; Configuration jobConf = job.getConfiguration(); jobConf.set(ZIP_NAME, hdfsZipName); Path zipPath = new Path("/tmp", getZipName(jobConf)); FileSystem fs = FileSystem.get(jobConf); fs.copyFromLocalFile(new Path(solrHomeZip.toString()), zipPath); final URI baseZipUrl = fs.getUri().resolve(zipPath.toString() + '#' + getZipName(jobConf)); DistributedCache.addCacheArchive(baseZipUrl, jobConf); LOG.debug("Set Solr distributed cache: {}", Arrays.asList(job.getCacheArchives())); LOG.debug("Set zipPath: {}", zipPath); // Actually send the path for the configuration zip file jobConf.set(SETUP_OK, zipPath.toString()); }
57. HadoopUtils#addAllSubPaths()
View licensepublic static JobConf addAllSubPaths(JobConf conf, Path path) throws IOException { if (shouldPathBeIgnored(path)) { throw new IllegalArgumentException(String.format("Path[%s] should be ignored.", path)); } final FileSystem fs = path.getFileSystem(conf); if (fs.exists(path)) { for (FileStatus status : fs.listStatus(path)) { if (!shouldPathBeIgnored(status.getPath())) { if (status.isDir()) { addAllSubPaths(conf, status.getPath()); } else { FileInputFormat.addInputPath(conf, status.getPath()); } } } } return conf; }
58. DistCacheConfigurer#configure()
View license@Override public void configure(Job job) throws IOException { Configuration conf = job.getConfiguration(); FileSystem localFS = FileSystem.getLocal(conf); FileSystem jobFS = FileSystem.get(conf); for (Path p : getLocalPaths()) { Path stagedPath = uploadFileIfNecessary(localFS, p, jobFS); // Calling this method decompresses the archive and makes Hadoop // handle its classfiles individually. This leads to crippling // overhead times (10+ seconds) even with the LocalJobRunner // courtesy of o.a.h.yarn.util.FSDownload.changePermissions // copying and chmodding each classfile copy file individually. //job.addArchiveToClassPath(p); // Just add the compressed archive instead: job.addFileToClassPath(stagedPath); } // We don't really need to set a mapred job jar here, // but doing so suppresses a warning String mj = getMapredJar(); if (null != mj) job.setJar(mj); }
59. TestFsActionExecutor#testDelete()
View licensepublic void testDelete() throws Exception { FsActionExecutor ae = new FsActionExecutor(); FileSystem fs = getFileSystem(); Path path = new Path(getFsTestCaseDir(), "dir1"); Context context = createContext("<fs/>"); fs.mkdirs(path); ae.delete(context, path); assertTrue(!fs.exists(path)); ae.delete(context, path); }
60. LauncherMapper#setupMainConfiguration()
View licenseprivate void setupMainConfiguration() throws IOException { FileSystem fs = FileSystem.get(getJobConf()); fs.copyToLocalFile(new Path(getJobConf().get(OOZIE_ACTION_DIR_PATH), ACTION_CONF_XML), new Path(new File(ACTION_CONF_XML).getAbsolutePath())); System.setProperty("oozie.launcher.job.id", getJobConf().get("mapred.job.id")); System.setProperty("oozie.job.id", getJobConf().get(OOZIE_JOB_ID)); System.setProperty("oozie.action.id", getJobConf().get(OOZIE_ACTION_ID)); System.setProperty("oozie.action.conf.xml", new File(ACTION_CONF_XML).getAbsolutePath()); System.setProperty("oozie.action.output.properties", new File(ACTION_OUTPUT_PROPS).getAbsolutePath()); System.setProperty("oozie.action.newId.properties", new File(ACTION_NEW_ID_PROPS).getAbsolutePath()); }
61. LauncherMapper#getRecoveryId()
View license/** * @param launcherConf * @param actionDir * @param recoveryId * @return * @throws HadoopAccessorException * @throws IOException */ public static String getRecoveryId(Configuration launcherConf, Path actionDir, String recoveryId) throws HadoopAccessorException, IOException { String jobId = null; Path recoveryFile = new Path(actionDir, recoveryId); //FileSystem fs = FileSystem.get(launcherConf); FileSystem fs = Services.get().get(HadoopAccessorService.class).createFileSystem(launcherConf.get("user.name"), launcherConf.get("group.name"), launcherConf); if (fs.exists(recoveryFile)) { InputStream is = fs.open(recoveryFile); BufferedReader reader = new BufferedReader(new InputStreamReader(is)); jobId = reader.readLine(); reader.close(); } return jobId; }
62. TestUtils#composeOutputHdfs()
View licensepublic static Path composeOutputHdfs(Class<?> c) throws IOException { String pn = packageName(c); String cn = c.getName().replace(pn + ".", ""); Path result = new Path(Defs.OUTPUT_HDFS + "/" + pn + "/" + cn + "/"); FileSystem fs = HadoopFileUtils.getFileSystem(result); if (fs.exists(result)) { fs.delete(result, true); } if (!fs.mkdirs(result)) { throw new IOException("Error creating test output HDFS directory (" + result + ")"); } return result; }
63. MapOpTestVectorUtils#generateBaselineVector()
View licensepublic void generateBaselineVector(final Configuration conf, final String testName, final String ex) throws IOException, ParserException, JobFailedException, JobCancelledException { runMapAlgebraExpression(conf, testName, ex); final Path src = new Path(outputHdfs, testName); final FileSystem srcfs = src.getFileSystem(conf); if (srcfs.exists(src)) { final Path dst = new Path(inputLocal, testName); final FileSystem fs = dst.getFileSystem(conf); fs.copyToLocalFile(src, dst); } }
64. HadoopFileUtils#copyFileToHdfs()
View licensepublic static void copyFileToHdfs(final String fromFile, final String toFile, final boolean overwrite) throws IOException { final Path toPath = new Path(toFile); final Path fromPath = new Path(fromFile); final FileSystem srcFS = HadoopFileUtils.getFileSystem(toPath); final FileSystem dstFS = HadoopFileUtils.getFileSystem(fromPath); final Configuration conf = HadoopUtils.createConfiguration(); InputStream in = null; OutputStream out = null; try { in = srcFS.open(fromPath); out = dstFS.create(toPath, overwrite); IOUtils.copyBytes(in, out, conf, true); } catch (final IOException e) { IOUtils.closeStream(out); IOUtils.closeStream(in); throw e; } }
65. Util#findPartFiles()
View licensepublic static List<Path> findPartFiles(JobConf conf, Path root) throws IOException { FileSystem fs = root.getFileSystem(new JobConf()); List<Path> files = new ArrayList<Path>(); for (FileStatus status : fs.listStatus(root)) { if (status.isDir()) { files.addAll(findPartFiles(conf, status.getPath())); } else { files.add(status.getPath()); } } return files; }
66. AvroHdfsFileReader#getPaths()
View license@Override protected List<Path> getPaths(String filePath) throws IOException { Path path = new Path(filePath); FileSystem fs = path.getFileSystem(getConf()); List<Path> paths = new ArrayList<Path>(); for (FileStatus status : fs.listStatus(path)) { if (status.isDir() && !AvroUtils.shouldPathBeIgnored(status.getPath())) { paths.addAll(getPaths(status.getPath().toString())); } else if (isAvro(status.getPath())) { paths.add(status.getPath()); } } return paths; }
67. HadoopUtil#uploadJars()
View licensepublic static void uploadJars(String sourcePath, String hdfsDestinationPath) throws IOException { Configuration conf = HadoopUtil.getHadoopConfiguration(); FileSystem fs = FileSystem.get(conf); Path localFilePath = new Path(sourcePath); Path hdfsFilePath = new Path(hdfsDestinationPath); log.info("Copying " + sourcePath + " to " + hdfsDestinationPath); fs.copyFromLocalFile(localFilePath, hdfsFilePath); log.info("Copied Successfully " + sourcePath + " to " + hdfsDestinationPath); }
68. CubeHFileJob#reconfigurePartitions()
View license/** * Check if there's partition files for hfile, if yes replace the table splits, to make the job more reducers * @param conf the job configuration * @param path the hfile partition file * @throws IOException */ @SuppressWarnings("deprecation") private void reconfigurePartitions(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); if (fs.exists(path)) { try (SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf)) { int partitionCount = 0; Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf); Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf); while (reader.next(key, value)) { partitionCount++; } TotalOrderPartitioner.setPartitionFile(job.getConfiguration(), path); // The reduce tasks should be one more than partition keys job.setNumReduceTasks(partitionCount + 1); } } else { logger.info("File '" + path.toString() + " doesn't exist, will not reconfigure hfile Partitions"); } }
69. HBaseResourceStore#writeLargeCellToHdfs()
View licenseprivate Path writeLargeCellToHdfs(String resPath, byte[] largeColumn, HTableInterface table) throws IOException { Path redirectPath = bigCellHDFSPath(resPath); Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration(); FileSystem fileSystem = FileSystem.get(hconf); if (fileSystem.exists(redirectPath)) { fileSystem.delete(redirectPath, true); } FSDataOutputStream out = fileSystem.create(redirectPath); try { out.write(largeColumn); } finally { IOUtils.closeQuietly(out); } return redirectPath; }
70. AppendTrieDictionary#copyToAnotherMeta()
View license@Override public AppendTrieDictionary copyToAnotherMeta(KylinConfig srcConfig, KylinConfig dstConfig) throws IOException { Configuration conf = new Configuration(); AppendTrieDictionary newDict = new AppendTrieDictionary(); newDict.update(baseDir.replaceFirst(srcConfig.getHdfsWorkingDirectory(), dstConfig.getHdfsWorkingDirectory()), baseId, maxId, maxValueLength, nValues, bytesConverter, writeDictMap()); logger.info("Copy AppendDict from {} to {}", this.baseDir, newDict.baseDir); Path srcPath = new Path(this.baseDir); Path dstPath = new Path(newDict.baseDir); FileSystem dstFs = FileSystem.get(dstPath.toUri(), conf); if (dstFs.exists(dstPath)) { logger.info("Delete existing AppendDict {}", dstPath); dstFs.delete(dstPath, true); } FileUtil.copy(FileSystem.get(srcPath.toUri(), conf), srcPath, FileSystem.get(dstPath.toUri(), conf), dstPath, false, true, conf); return newDict; }
71. HDFSFileFinder#getNumBytesOfGlobHeldByDatanodes()
View licensepublic static Map<String, Long> getNumBytesOfGlobHeldByDatanodes(Path p, Configuration conf) throws IOException { FileSystem fs = p.getFileSystem(conf); HashMap<String, Long> bytesHeld = Maps.newHashMap(); for (FileStatus f : fs.globStatus(p)) { BlockLocation[] bls = fs.getFileBlockLocations(p, 0, f.getLen()); if (bls.length > 0) { for (BlockLocation bl : bls) { long l = bl.getLength(); for (String name : bl.getNames()) { if (bytesHeld.containsKey(name)) bytesHeld.put(name, bytesHeld.get(name) + l); else bytesHeld.put(name, l); } } } } return bytesHeld; }
72. ResultMergeLocalFile#copyAllFiles()
View license/** * * @param fnameNew * @param inMO * @throws CacheException * @throws IOException */ private void copyAllFiles(String fnameNew, ArrayList<MatrixObject> inMO) throws CacheException, IOException { JobConf job = new JobConf(ConfigurationManager.getCachedJobConf()); FileSystem fs = FileSystem.get(job); Path path = new Path(fnameNew); //create output dir fs.mkdirs(path); //merge in all input matrix objects IDSequence seq = new IDSequence(); for (MatrixObject in : inMO) { LOG.trace("ResultMerge (local, file): Merge input " + in.getVarName() + " (fname=" + in.getFileName() + ") via file rename."); //copy over files (just rename file or entire dir) Path tmpPath = new Path(in.getFileName()); String lname = tmpPath.getName(); fs.rename(tmpPath, new Path(fnameNew + "/" + lname + seq.getNextID())); } }
73. AbstractSolrSentryTestBase#setupSentry()
View licensepublic static File setupSentry() throws Exception { File sentrySite = File.createTempFile("sentry-site", "xml"); sentrySite.deleteOnExit(); File authProviderDir = new File(RESOURCES_DIR, "sentry"); String authProviderName = "test-authz-provider.ini"; FileSystem clusterFs = dfsCluster.getFileSystem(); clusterFs.copyFromLocalFile(false, new Path(authProviderDir.toString(), authProviderName), new Path(authProviderName)); // need to write sentry-site at execution time because we don't know // the location of sentry.solr.provider.resource beforehand StringBuilder sentrySiteData = new StringBuilder(); sentrySiteData.append("<configuration>\n"); addPropertyToSentry(sentrySiteData, "sentry.provider", "org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider"); addPropertyToSentry(sentrySiteData, "sentry.solr.provider.resource", clusterFs.getWorkingDirectory() + File.separator + authProviderName); sentrySiteData.append("</configuration>\n"); FileUtils.writeStringToFile(sentrySite, sentrySiteData.toString()); return sentrySite; }
74. TestHQuorumPeer#setup()
View license@Before public void setup() throws IOException { // Set it to a non-standard port. TEST_UTIL.getConfiguration().setInt(HConstants.ZOOKEEPER_CLIENT_PORT, PORT_NO); this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName()); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); if (fs.exists(this.dataDir)) { if (!fs.delete(this.dataDir, true)) { throw new IOException("Failed cleanup of " + this.dataDir); } } if (!fs.mkdirs(this.dataDir)) { throw new IOException("Failed create of " + this.dataDir); } }
75. TestHBaseFsck#testNoVersionFile()
View license/** * when the hbase.version file missing, It is fix the fault. */ @Test public void testNoVersionFile() throws Exception { // delete the hbase.version file Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); FileSystem fs = rootDir.getFileSystem(conf); Path versionFile = new Path(rootDir, HConstants.VERSION_FILE_NAME); fs.delete(versionFile, true); // test HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_VERSION_FILE }); // fix hbase.version missing doFsck(conf, true); // no version file fixed assertNoErrors(doFsck(conf, false)); }
76. OfflineMetaRebuildTestCore#createRegion()
View licenseprotected HRegionInfo createRegion(Configuration conf, final HTable htbl, byte[] startKey, byte[] endKey) throws IOException { HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); HTableDescriptor htd = htbl.getTableDescriptor(); HRegionInfo hri = new HRegionInfo(htbl.getTableName(), startKey, endKey); LOG.info("manually adding regioninfo and hdfs data: " + hri.toString()); Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); FileSystem fs = rootDir.getFileSystem(conf); Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName()); fs.mkdirs(p); Path riPath = new Path(p, HRegion.REGIONINFO_FILE); FSDataOutputStream out = fs.create(riPath); hri.write(out); out.close(); // add to meta. Put put = new Put(hri.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); meta.put(put); meta.flushCommits(); return hri; }
77. TestHLogSplit#testRecoveredEditsPathForMeta()
View license/** * @throws IOException * @see https://issues.apache.org/jira/browse/HBASE-3020 */ @Test public void testRecoveredEditsPathForMeta() throws IOException { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); byte[] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME)); Path regiondir = new Path(tdir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); long now = System.currentTimeMillis(); HLog.Entry entry = new HLog.Entry(new HLogKey(encoded, HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true); String parentOfParent = p.getParent().getParent().getName(); assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); }
78. UtilsForTests#writeFile()
View licensestatic void writeFile(NameNode namenode, Configuration conf, Path name, short replication) throws IOException { FileSystem fileSys = FileSystem.get(conf); SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, BytesWritable.class, BytesWritable.class, CompressionType.NONE); writer.append(new BytesWritable(), new BytesWritable()); writer.close(); fileSys.setReplication(name, replication); DFSTestUtil.waitReplication(fileSys, name, replication); }
79. TestMapRed#printFiles()
View licenseprivate static void printFiles(Path dir, Configuration conf) throws IOException { FileSystem fs = dir.getFileSystem(conf); for (FileStatus f : fs.listStatus(dir)) { System.out.println("Reading " + f.getPath() + ": "); if (f.isDir()) { System.out.println(" it is a map file."); printSequenceFile(fs, new Path(f.getPath(), "data"), conf); } else if (isSequenceFile(fs, f.getPath())) { System.out.println(" it is a sequence file."); printSequenceFile(fs, f.getPath(), conf); } else { System.out.println(" it is a text file."); printTextFile(fs, f.getPath()); } } }
80. TestMultipleOutputs#setUp()
View licensepublic void setUp() throws Exception { super.setUp(); Path rootDir = getDir(ROOT_DIR); Path inDir = getDir(IN_DIR); JobConf conf = createJobConf(); FileSystem fs = FileSystem.get(conf); fs.delete(rootDir, true); if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } }
81. TestMultipleOutputs#setUp()
View licensepublic void setUp() throws Exception { super.setUp(); Path rootDir = getDir(ROOT_DIR); Path inDir = getDir(IN_DIR); JobConf conf = createJobConf(); FileSystem fs = FileSystem.get(conf); fs.delete(rootDir, true); if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } }
82. TaskRunner#writeLocalTaskFile()
View license/** * Write the task specific job-configuration file. * * @param localFs * @throws IOException */ private static void writeLocalTaskFile(String jobFile, JobConf conf) throws IOException { Path localTaskFile = new Path(jobFile); FileSystem localFs = FileSystem.getLocal(conf); localFs.delete(localTaskFile, true); OutputStream out = localFs.create(localTaskFile); try { conf.writeXml(out); } finally { out.close(); } }
83. DumpTypedBytes#run()
View license/** * The main driver for <code>DumpTypedBytes</code>. */ public int run(String[] args) throws Exception { Path pattern = new Path(args[0]); FileSystem fs = pattern.getFileSystem(getConf()); fs.setVerifyChecksum(true); for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) { List<FileStatus> inputFiles = new ArrayList<FileStatus>(); FileStatus status = fs.getFileStatus(p); if (status.isDir()) { FileStatus[] files = fs.listStatus(p); Collections.addAll(inputFiles, files); } else { inputFiles.add(status); } return dumpTypedBytes(inputFiles); } return -1; }
84. TestSplittableBufferedWriter#ensureEmptyWriteDir()
View license/** Create the directory where we'll write our test files to; and * make sure it has no files in it. */ private void ensureEmptyWriteDir() throws IOException { FileSystem fs = FileSystem.getLocal(getConf()); Path writeDir = getWritePath(); fs.mkdirs(writeDir); FileStatus[] stats = fs.listStatus(writeDir); for (FileStatus stat : stats) { if (stat.isDir()) { fail("setUp(): Write directory " + writeDir + " contains subdirectories"); } LOG.debug("setUp(): Removing " + stat.getPath()); if (!fs.delete(stat.getPath(), false)) { fail("setUp(): Could not delete residual file " + stat.getPath()); } } if (!fs.exists(writeDir)) { fail("setUp: Could not create " + writeDir); } }
85. DirectImportUtils#createHdfsSink()
View license/** * Open a file in HDFS for write to hold the data associated with a table. * Creates any necessary directories, and returns the OutputStream to write * to. The caller is responsible for calling the close() method on the * returned stream. */ public static SplittableBufferedWriter createHdfsSink(Configuration conf, ImportOptions options, String tableName) throws IOException { FileSystem fs = FileSystem.get(conf); String warehouseDir = options.getWarehouseDir(); Path destDir = null; if (null != warehouseDir) { destDir = new Path(new Path(warehouseDir), tableName); } else { destDir = new Path(tableName); } LOG.debug("Writing to filesystem: " + conf.get("fs.default.name")); LOG.debug("Creating destination directory " + destDir); fs.mkdirs(destDir); // This Writer will be closed by the caller. return new SplittableBufferedWriter(new SplittingOutputStream(conf, destDir, "data-", options.getDirectSplitSize(), options.shouldUseCompression())); }
86. HiveImport#removeTempLogs()
View license/** * If we used a MapReduce-based upload of the data, remove the _logs dir * from where we put it, before running Hive LOAD DATA INPATH */ private void removeTempLogs(String tableName) throws IOException { FileSystem fs = FileSystem.get(configuration); String warehouseDir = options.getWarehouseDir(); Path tablePath; if (warehouseDir != null) { tablePath = new Path(new Path(warehouseDir), tableName); } else { tablePath = new Path(tableName); } Path logsPath = new Path(tablePath, "_logs"); if (fs.exists(logsPath)) { LOG.info("Removing temporary files from import process: " + logsPath); if (!fs.delete(logsPath, true)) { LOG.warn("Could not delete temporary files; continuing with import, but it may fail."); } } }
87. TestFileCorruption#testLocalFileCorruption()
View license/** check if local FS can handle corrupted blocks properly */ public void testLocalFileCorruption() throws Exception { Configuration conf = new Configuration(); Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); FileSystem fs = FileSystem.getLocal(conf); DataOutputStream dos = fs.create(file); dos.writeBytes("original bytes"); dos.close(); // Now deliberately corrupt the file dos = new DataOutputStream(new FileOutputStream(file.toString())); dos.writeBytes("corruption"); dos.close(); // Now attempt to read the file DataInputStream dis = fs.open(file, 512); try { System.out.println("A ChecksumException is expected to be logged."); dis.readByte(); } catch (ChecksumException ignore) { } fs.delete(file, true); }
88. TestScanFilterEvaluatorForIndexInScan#initHRegion()
View licenseprivate static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); for (byte[] family : families) { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); Path path = new Path(DIR + callingMethod); FileSystem fs = FileSystem.get(conf); if (fs.exists(path)) { if (!fs.delete(path, true)) { throw new IOException("Failed delete of " + path); } } return HRegion.createHRegion(info, path, conf, htd); }
89. TestScanFilterEvaluator#initHRegion()
View license/** * @param tableName * @param startKey * @param stopKey * @param callingMethod * @param conf * @param families * @throws IOException * @return A region on which you must call {@link HRegion#closeHRegion(HRegion)} when done. */ private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); for (byte[] family : families) { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); Path path = new Path(DIR + callingMethod); FileSystem fs = FileSystem.get(conf); if (fs.exists(path)) { if (!fs.delete(path, true)) { throw new IOException("Failed delete of " + path); } } return HRegion.createHRegion(info, path, conf, htd); }
90. Analyzer#main()
View licensepublic static void main(String[] args) throws Exception { Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path outdir = new Path("/result.txt"); fs.delete(outdir, true); Job job = new Job(conf, "Result Analyzer"); job.setJarByClass(Analyzer.class); job.setMapperClass(_Mapper.class); job.setReducerClass(_Reducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(TestDFSIOEnh.READ_DIR, "part-00000")); FileOutputFormat.setOutputPath(job, outdir); System.exit(job.waitForCompletion(true) ? 0 : 1); }
91. Utils#getSharedZipfCore()
View licenseprivate static final ZipfCore getSharedZipfCore(String fname, JobConf job) throws IOException, ClassNotFoundException { ZipfCore zipfcore = null; FileSystem fs = FileSystem.getLocal(job); Path symbLink = new Path(fname); if (fs.exists(symbLink)) { FileInputStream fi = new FileInputStream(symbLink.toString()); ObjectInputStream si = new ObjectInputStream(fi); zipfcore = (ZipfCore) si.readObject(); si.close(); } return zipfcore; }
92. Utils#serialZipfCore()
View license/*** * Steps to make a ZipfCore available for each job * Client side * 1. Zipfian creates its corresponding ZipfCore object * 2. serialize the ZipfCore into a hdfs file * 3. share the hdfs file by putting it into distributed cache file * Job side * 1. read object from distributed cache file to re-create the ZipfCore * @throws IOException */ private static final void serialZipfCore(Zipfian zipfian, Path hdfs_zipf) throws IOException { Utils.checkHdfsPath(hdfs_zipf); FileSystem fs = hdfs_zipf.getFileSystem(new Configuration()); FSDataOutputStream fout = fs.create(hdfs_zipf); ObjectOutputStream so = new ObjectOutputStream(fout); ZipfCore core = zipfian.createZipfCore(); so.writeObject(core); so.close(); fout.close(); fs.close(); }
93. HCatOutputFormat#getRecordWriter()
View license/** * Get the record writer for the job. Uses the Table's default OutputStorageDriver * to get the record writer. * @param context the information about the current task. * @return a RecordWriter to write the output for the job. * @throws IOException */ @Override public RecordWriter<WritableComparable<?>, HCatRecord> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { // First create the RW. HCatRecordWriter rw = new HCatRecordWriter(context); // Now set permissions and group on freshly created files. OutputJobInfo info = getJobInfo(context); Path workFile = rw.getStorageDriver().getWorkFilePath(context, info.getLocation()); Path tblPath = new Path(info.getTable().getSd().getLocation()); FileSystem fs = tblPath.getFileSystem(context.getConfiguration()); FileStatus tblPathStat = fs.getFileStatus(tblPath); fs.setPermission(workFile, tblPathStat.getPermission()); try { fs.setOwner(workFile, null, tblPathStat.getGroup()); } catch (AccessControlException ace) { } return rw; }
94. MasterObserverExample#postCreateTable()
View license// vv MasterObserverExample @Override public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException { // ^^ MasterObserverExample LOG.debug("Got postCreateTable callback"); // vv MasterObserverExample // co MasterObserverExample-1-GetName Get the new table's name from the table descriptor. TableName tableName = desc.getTableName(); // ^^ MasterObserverExample LOG.debug("Created table: " + tableName + ", region count: " + regions.length); // vv MasterObserverExample MasterServices services = ctx.getEnvironment().getMasterServices(); // co MasterObserverExample-2-Services Get the available services and retrieve a reference to the actual file system. MasterFileSystem masterFileSystem = services.getMasterFileSystem(); FileSystem fileSystem = masterFileSystem.getFileSystem(); // co MasterObserverExample-3-Path Create a new directory that will store binary data from the client application. Path blobPath = new Path(tableName.getQualifierAsString() + "-blobs"); fileSystem.mkdirs(blobPath); // ^^ MasterObserverExample LOG.debug("Created " + blobPath + ": " + fileSystem.exists(blobPath)); // vv MasterObserverExample }
95. TestMapReduce#printFiles()
View licenseprivate static void printFiles(Path dir, Configuration conf) throws IOException { FileSystem fs = dir.getFileSystem(conf); for (FileStatus f : fs.listStatus(dir)) { System.out.println("Reading " + f.getPath() + ": "); if (f.isDir()) { System.out.println(" it is a map file."); printSequenceFile(fs, new Path(f.getPath(), "data"), conf); } else if (isSequenceFile(fs, f.getPath())) { System.out.println(" it is a sequence file."); printSequenceFile(fs, f.getPath(), conf); } else { System.out.println(" it is a text file."); printTextFile(fs, f.getPath()); } } }
96. TestMultipleInputs#setUp()
View license@Before public void setUp() throws Exception { super.setUp(); Path rootDir = getDir(ROOT_DIR); Path in1Dir = getDir(IN1_DIR); Path in2Dir = getDir(IN2_DIR); Configuration conf = createJobConf(); FileSystem fs = FileSystem.get(conf); fs.delete(rootDir, true); if (!fs.mkdirs(in1Dir)) { throw new IOException("Mkdirs failed to create " + in1Dir.toString()); } if (!fs.mkdirs(in2Dir)) { throw new IOException("Mkdirs failed to create " + in2Dir.toString()); } }
97. UtilsForTests#writeFile()
View licensestatic void writeFile(NameNode namenode, Configuration conf, Path name, short replication) throws IOException { FileSystem fileSys = FileSystem.get(conf); SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, BytesWritable.class, BytesWritable.class, CompressionType.NONE); writer.append(new BytesWritable(), new BytesWritable()); writer.close(); fileSys.setReplication(name, replication); DFSTestUtil.waitReplication(fileSys, name, replication); }
98. TestMapRed#printFiles()
View licenseprivate static void printFiles(Path dir, Configuration conf) throws IOException { FileSystem fs = dir.getFileSystem(conf); for (FileStatus f : fs.listStatus(dir)) { System.out.println("Reading " + f.getPath() + ": "); if (f.isDir()) { System.out.println(" it is a map file."); printSequenceFile(fs, new Path(f.getPath(), "data"), conf); } else if (isSequenceFile(fs, f.getPath())) { System.out.println(" it is a sequence file."); printSequenceFile(fs, f.getPath(), conf); } else { System.out.println(" it is a text file."); printTextFile(fs, f.getPath()); } } }
99. TestKillSubProcesses#runTests()
View licensevoid runTests(JobConf conf, JobTracker jt) throws IOException { FileSystem fs = FileSystem.getLocal(mr.createJobConf()); Path rootDir = new Path(TEST_ROOT_DIR); if (!fs.exists(rootDir)) { fs.mkdirs(rootDir); } fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); runKillingJobAndValidate(jt, conf); runFailingJobAndValidate(jt, conf); runSuccessfulJobAndValidate(jt, conf); }
100. TestFileCorruption#testLocalFileCorruption()
View license/** check if local FS can handle corrupted blocks properly */ public void testLocalFileCorruption() throws Exception { Configuration conf = new Configuration(); Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); FileSystem fs = FileSystem.getLocal(conf); DataOutputStream dos = fs.create(file); dos.writeBytes("original bytes"); dos.close(); // Now deliberately corrupt the file dos = new DataOutputStream(new FileOutputStream(file.toString())); dos.writeBytes("corruption"); dos.close(); // Now attempt to read the file DataInputStream dis = fs.open(file, 512); try { System.out.println("A ChecksumException is expected to be logged."); dis.readByte(); } catch (ChecksumException ignore) { } fs.delete(file, true); }