org.apache.hadoop.fs.FileSystem

Here are the examples of the java api class org.apache.hadoop.fs.FileSystem taken from open source projects.

1. HadoopUtilsTest#testRenameRecursively()

Project: gobblin
Source File: HadoopUtilsTest.java
View license
@Test
public void testRenameRecursively() throws Exception {
    FileSystem fs = FileSystem.getLocal(new Configuration());
    fs.mkdirs(new Path(hadoopUtilsTestDir, "testRename/a/b/c"));
    fs.mkdirs(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c"));
    fs.mkdirs(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/e"));
    fs.create(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/t1.txt"));
    fs.create(new Path(hadoopUtilsTestDir, "testRenameStaging/a/b/c/e/t2.txt"));
    HadoopUtils.renameRecursively(fs, new Path(hadoopUtilsTestDir, "testRenameStaging"), new Path(hadoopUtilsTestDir, "testRename"));
    Assert.assertTrue(fs.exists(new Path(hadoopUtilsTestDir, "testRename/a/b/c/t1.txt")));
    Assert.assertTrue(fs.exists(new Path(hadoopUtilsTestDir, "testRename/a/b/c/e/t2.txt")));
}

2. LinkDb#install()

Project: anthelion
Source File: LinkDb.java
View license
public static void install(JobConf job, Path linkDb) throws IOException {
    Path newLinkDb = FileOutputFormat.getOutputPath(job);
    FileSystem fs = new JobClient(job).getFs();
    Path old = new Path(linkDb, "old");
    Path current = new Path(linkDb, CURRENT_NAME);
    if (fs.exists(current)) {
        if (fs.exists(old))
            fs.delete(old, true);
        fs.rename(current, old);
    }
    fs.mkdirs(linkDb);
    fs.rename(newLinkDb, current);
    if (fs.exists(old))
        fs.delete(old, true);
    LockUtil.removeLockFile(fs, new Path(linkDb, LOCK_NAME));
}

3. NutchData#generate()

Project: HiBench
Source File: NutchData.java
View license
/*	
	private void test2LevelMapFile(Path furl) throws IOException {

		JobConf job = new JobConf();
		FileSystem fs = FileSystem.get(job);
		MapFile.Reader reader = new MapFile.Reader(fs, furl.toString(), job);
		Text value = new Text();
		reader.get(new LongWritable(1000), value);
		if (null != value) {
			log.info("---Find it: <1000, " + value + ">");
		}
	}
*/
public void generate() throws Exception {
    init();
    createNutchUrls();
    createNutchIndexData();
    Path ffetch = new Path(options.getResultPath(), CrawlDatum.FETCH_DIR_NAME);
    Path fparse = new Path(options.getResultPath(), CrawlDatum.PARSE_DIR_NAME);
    Path linkdb = new Path(segment, LINKDB_DIR_NAME);
    FileSystem fs = ffetch.getFileSystem(new Configuration());
    fs.rename(ffetch, new Path(segment, CrawlDatum.FETCH_DIR_NAME));
    fs.rename(fparse, new Path(segment, CrawlDatum.PARSE_DIR_NAME));
    fs.rename(linkdb, new Path(options.getResultPath(), LINKDB_DIR_NAME));
    fs.close();
    close();
}

4. PailTap#commitResource()

Project: dfs-datastores
Source File: PailTap.java
View license
@Override
public boolean commitResource(JobConf conf) throws IOException {
    Pail p = Pail.create(_pailRoot, ((PailScheme) getScheme()).getSpec(), false);
    FileSystem fs = p.getFileSystem();
    Path tmpPath = new Path(_pailRoot, "_temporary");
    if (fs.exists(tmpPath)) {
        LOG.info("Deleting _temporary directory left by Hadoop job: " + tmpPath.toString());
        fs.delete(tmpPath, true);
    }
    Path tmp2Path = new Path(_pailRoot, "_temporary2");
    if (fs.exists(tmp2Path)) {
        LOG.info("Deleting _temporary2 directory: " + tmp2Path.toString());
        fs.delete(tmp2Path, true);
    }
    Path logPath = new Path(_pailRoot, "_logs");
    if (fs.exists(logPath)) {
        LOG.info("Deleting _logs directory left by Hadoop job: " + logPath.toString());
        fs.delete(logPath, true);
    }
    return true;
}

5. TestMiniMRChildTask#launchTest()

Project: hadoop-20
Source File: TestMiniMRChildTask.java
View license
/**
   * Launch tests 
   * @param conf Configuration of the mapreduce job.
   * @param inDir input path
   * @param outDir output path
   * @param input Input text
   * @throws IOException
   */
public void launchTest(JobConf conf, Path inDir, Path outDir, String input) throws IOException {
    configure(conf, inDir, outDir, input, MapClass.class, IdentityReducer.class);
    FileSystem outFs = outDir.getFileSystem(conf);
    // Launch job with default option for temp dir. 
    // i.e. temp dir is ./tmp 
    JobClient.runJob(conf);
    outFs.delete(outDir, true);
    // Launch job by giving relative path to temp dir.
    conf.set("mapred.child.tmp", "../temp");
    JobClient.runJob(conf);
    outFs.delete(outDir, true);
    // Launch job by giving absolute path to temp dir
    conf.set("mapred.child.tmp", "/tmp");
    JobClient.runJob(conf);
    outFs.delete(outDir, true);
}

6. MapReduceTestUtil#createJob()

Project: hadoop-20
Source File: MapReduceTestUtil.java
View license
public static Job createJob(Configuration conf, Path inDir, Path outDir, int numInputFiles, int numReds, String input) throws IOException {
    Job job = new Job(conf);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(outDir)) {
        fs.delete(outDir, true);
    }
    if (fs.exists(inDir)) {
        fs.delete(inDir, true);
    }
    fs.mkdirs(inDir);
    for (int i = 0; i < numInputFiles; ++i) {
        DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
        file.writeBytes(input);
        file.close();
    }
    FileInputFormat.setInputPaths(job, inDir);
    FileOutputFormat.setOutputPath(job, outDir);
    job.setNumReduceTasks(numReds);
    return job;
}

7. MapReduceTestUtil#createJob()

View license
public static Job createJob(Configuration conf, Path inDir, Path outDir, int numInputFiles, int numReds, String input) throws IOException {
    Job job = new Job(conf);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(outDir)) {
        fs.delete(outDir, true);
    }
    if (fs.exists(inDir)) {
        fs.delete(inDir, true);
    }
    fs.mkdirs(inDir);
    for (int i = 0; i < numInputFiles; ++i) {
        DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
        file.writeBytes(input);
        file.close();
    }
    FileInputFormat.setInputPaths(job, inDir);
    FileOutputFormat.setOutputPath(job, outDir);
    job.setNumReduceTasks(numReds);
    return job;
}

8. CrawlDbMerger#merge()

Project: anthelion
Source File: CrawlDbMerger.java
View license
public void merge(Path output, Path[] dbs, boolean normalize, boolean filter) throws Exception {
    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    long start = System.currentTimeMillis();
    LOG.info("CrawlDb merge: starting at " + sdf.format(start));
    JobConf job = createMergeJob(getConf(), output, normalize, filter);
    for (int i = 0; i < dbs.length; i++) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Adding " + dbs[i]);
        }
        FileInputFormat.addInputPath(job, new Path(dbs[i], CrawlDb.CURRENT_NAME));
    }
    JobClient.runJob(job);
    FileSystem fs = FileSystem.get(getConf());
    fs.mkdirs(output);
    fs.rename(FileOutputFormat.getOutputPath(job), new Path(output, CrawlDb.CURRENT_NAME));
    long end = System.currentTimeMillis();
    LOG.info("CrawlDb merge: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end));
}

9. LinkDbMerger#merge()

Project: anthelion
Source File: LinkDbMerger.java
View license
public void merge(Path output, Path[] dbs, boolean normalize, boolean filter) throws Exception {
    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    long start = System.currentTimeMillis();
    LOG.info("LinkDb merge: starting at " + sdf.format(start));
    JobConf job = createMergeJob(getConf(), output, normalize, filter);
    for (int i = 0; i < dbs.length; i++) {
        FileInputFormat.addInputPath(job, new Path(dbs[i], LinkDb.CURRENT_NAME));
    }
    JobClient.runJob(job);
    FileSystem fs = FileSystem.get(getConf());
    fs.mkdirs(output);
    fs.rename(FileOutputFormat.getOutputPath(job), new Path(output, LinkDb.CURRENT_NAME));
    long end = System.currentTimeMillis();
    LOG.info("LinkDb merge: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end));
}

10. HdfsAppendTest#tearDown()

Project: camel
Source File: HdfsAppendTest.java
View license
@Override
public void tearDown() throws Exception {
    super.tearDown();
    Thread.sleep(250);
    Configuration conf = new Configuration();
    Path dir = new Path("hdfs://localhost:9000/tmp/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
    dir = new Path("hdfs://localhost:9000/tmp/test-dynamic");
    fs.delete(dir, true);
}

11. HdfsProducerConsumerIntegrationTest#tearDown()

View license
@Override
@After
public void tearDown() throws Exception {
    super.tearDown();
    Thread.sleep(250);
    Configuration conf = new Configuration();
    Path dir = new Path("hdfs://localhost:9000/tmp/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
    fs.delete(new Path("hdfs://localhost:9000/tmp/test/multiple-consumers"), true);
}

12. TestDFSWrite#testDirectWrite()

Project: flume
Source File: TestDFSWrite.java
View license
@Test
public void testDirectWrite() throws IOException {
    FlumeConfiguration conf = FlumeConfiguration.get();
    Path path = new Path("file:///tmp/testfile");
    FileSystem hdfs = path.getFileSystem(conf);
    hdfs.deleteOnExit(path);
    String STRING = "Hello World";
    // writing
    FSDataOutputStream dos = hdfs.create(path);
    dos.writeUTF(STRING);
    dos.close();
    // reading
    FSDataInputStream dis = hdfs.open(path);
    String s = dis.readUTF();
    System.out.println(s);
    assertEquals(STRING, s);
    dis.close();
    hdfs.close();
}

13. NNBench#cleanupBeforeTestrun()

Project: hadoop-20
Source File: NNBench.java
View license
// private static Configuration config = new Configuration();
/**
   * Clean up the files before a test run
   * 
   * @throws IOException on error
   */
private static void cleanupBeforeTestrun(Configuration config) throws IOException {
    FileSystem tempFS = FileSystem.get(config);
    // Delete the data directory only if it is the create/write operation
    if (operation.equals(OP_CREATE_WRITE)) {
        LOG.info("Deleting data directory");
        tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true);
    }
    tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true);
    tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true);
}

14. TestJobStatusPersistency#testLocalPersistency()

View license
/**
   * Test if the completed job status is persisted to localfs.
   */
public void testLocalPersistency() throws Exception {
    FileSystem fs = FileSystem.getLocal(createJobConf());
    fs.delete(TEST_DIR, true);
    Properties config = new Properties();
    config.setProperty("mapred.job.tracker.persist.jobstatus.active", "true");
    config.setProperty("mapred.job.tracker.persist.jobstatus.hours", "1");
    config.setProperty("mapred.job.tracker.persist.jobstatus.dir", fs.makeQualified(TEST_DIR).toString());
    stopCluster();
    startCluster(false, config);
    JobID jobId = runJob();
    JobClient jc = new JobClient(createJobConf());
    RunningJob rj = jc.getJob(jobId);
    assertNotNull(rj);
    // check if the local fs has the data
    Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info");
    assertTrue("Missing job info from the local fs", fs.exists(jobInfo));
    fs.delete(TEST_DIR, true);
}

15. TestJobHistory#cleanupLocalFiles()

View license
private void cleanupLocalFiles(MiniMRCluster mr) throws IOException {
    Configuration conf = mr.createJobConf();
    JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
    Path sysDir = new Path(jt.getSystemDir());
    FileSystem fs = sysDir.getFileSystem(conf);
    fs.delete(sysDir, true);
    Path jobHistoryDir = mr.getJobTrackerRunner().getJobTracker().getJobHistory().getJobHistoryLocation();
    fs = jobHistoryDir.getFileSystem(conf);
    fs.delete(jobHistoryDir, true);
}

16. TestJobStatusPersistency#testLocalPersistency()

View license
/**
   * Test if the completed job status is persisted to localfs.
   */
public void testLocalPersistency() throws Exception {
    FileSystem fs = FileSystem.getLocal(createJobConf());
    fs.delete(TEST_DIR, true);
    Properties config = new Properties();
    config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "true");
    config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1");
    config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_DIR, fs.makeQualified(TEST_DIR).toString());
    stopCluster();
    startCluster(false, config);
    JobID jobId = runJob();
    JobClient jc = new JobClient(createJobConf());
    RunningJob rj = jc.getJob(jobId);
    assertNotNull(rj);
    // check if the local fs has the data
    Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info");
    assertTrue("Missing job info from the local fs", fs.exists(jobInfo));
    fs.delete(TEST_DIR, true);
}

17. TestSeveral#makeInput()

Project: hadoop-mapreduce
Source File: TestSeveral.java
View license
/** 
   * Utility class to create input for the jobs
   * @param inDir
   * @param conf
   * @throws IOException
   */
private void makeInput(Path inDir, JobConf conf) throws IOException {
    FileSystem inFs = inDir.getFileSystem(conf);
    if (inFs.exists(inDir)) {
        inFs.delete(inDir, true);
    }
    inFs.mkdirs(inDir);
    Path inFile = new Path(inDir, "part-0");
    DataOutputStream file = inFs.create(inFile);
    for (int i = 0; i < numReduces; i++) {
        file.writeBytes("b a\n");
    }
    file.close();
}

18. Utils#checkHdfsPath()

Project: HiBench
Source File: Utils.java
View license
public static final void checkHdfsPath(Path path, boolean mkdir) throws IOException {
    FileSystem fs = path.getFileSystem(new Configuration());
    if (fs.exists(path)) {
        fs.delete(path, true);
    }
    if (mkdir) {
        fs.mkdirs(path);
    }
    fs.close();
}

19. TestHFileCleaner#testTTLCleaner()

Project: hindex
Source File: TestHFileCleaner.java
View license
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
    FileSystem fs = UTIL.getDFSCluster().getFileSystem();
    Path root = UTIL.getDataTestDir();
    Path file = new Path(root, "file");
    fs.createNewFile(file);
    long createTime = System.currentTimeMillis();
    assertTrue("Test file not created!", fs.exists(file));
    TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
    // update the time info for the file, so the cleaner removes it
    fs.setTimes(file, createTime - 100, -1);
    Configuration conf = UTIL.getConfiguration();
    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
    cleaner.setConf(conf);
    assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs) + " with create time:" + createTime, cleaner.isFileDeletable(file));
}

20. SortBasedColPartitionStoreExec#getAppender()

View license
private Appender getAppender(String partition) throws IOException {
    Path dataFile = getDataFile(partition);
    FileSystem fs = dataFile.getFileSystem(context.getConf());
    if (fs.exists(dataFile.getParent())) {
        LOG.info("Path " + dataFile.getParent() + " already exists!");
    } else {
        fs.mkdirs(dataFile.getParent());
        LOG.info("Add subpartition path directory :" + dataFile.getParent());
    }
    if (fs.exists(dataFile)) {
        LOG.info("File " + dataFile + " already exists!");
        FileStatus status = fs.getFileStatus(dataFile);
        LOG.info("File size: " + status.getLen());
    }
    appender = StorageManagerFactory.getStorageManager(context.getConf()).getAppender(meta, outSchema, dataFile);
    appender.enableStats();
    appender.init();
    return appender;
}

21. TestSpecificInputOutputFormat#createParquetFile()

View license
@Before
public void createParquetFile() throws Exception {
    final FileSystem fileSystem = parquetPath.getFileSystem(conf);
    fileSystem.delete(parquetPath, true);
    fileSystem.delete(outputPath, true);
    {
        final Job job = new Job(conf, "write");
        // input not really used
        TextInputFormat.addInputPath(job, inputPath);
        job.setInputFormatClass(TextInputFormat.class);
        job.setMapperClass(TestSpecificInputOutputFormat.MyMapper.class);
        job.setNumReduceTasks(0);
        job.setOutputFormatClass(AvroParquetOutputFormat.class);
        AvroParquetOutputFormat.setOutputPath(job, parquetPath);
        AvroParquetOutputFormat.setSchema(job, Car.SCHEMA$);
        waitForJob(job);
    }
}

22. DeprecatedOutputFormatTest#runMapReduceJob()

View license
private void runMapReduceJob(CompressionCodecName codec) throws IOException, ClassNotFoundException, InterruptedException {
    final FileSystem fileSystem = parquetPath.getFileSystem(conf);
    fileSystem.delete(parquetPath, true);
    fileSystem.delete(outputPath, true);
    {
        jobConf.setInputFormat(TextInputFormat.class);
        TextInputFormat.addInputPath(jobConf, inputPath);
        jobConf.setNumReduceTasks(0);
        jobConf.setOutputFormat(DeprecatedParquetOutputFormat.class);
        DeprecatedParquetOutputFormat.setCompression(jobConf, codec);
        DeprecatedParquetOutputFormat.setOutputPath(jobConf, parquetPath);
        DeprecatedParquetOutputFormat.setWriteSupportClass(jobConf, GroupWriteSupport.class);
        GroupWriteSupport.setSchema(MessageTypeParser.parseMessageType(writeSchema), jobConf);
        jobConf.setMapperClass(DeprecatedMapper.class);
        mapRedJob = JobClient.runJob(jobConf);
    }
}

23. TestMultiStorage#setUp()

Project: pig
Source File: TestMultiStorage.java
View license
@Override
@Before
public void setUp() throws Exception {
    createFile();
    FileSystem fs = FileSystem.getLocal(new Configuration());
    Path localOut = new Path("local-out");
    Path dummy = new Path("dummy");
    if (fs.exists(localOut)) {
        fs.delete(localOut, true);
    }
    if (fs.exists(dummy)) {
        fs.delete(dummy, true);
    }
}

24. Util#createInputFile()

Project: pig
Source File: Util.java
View license
public static OutputStream createInputFile(MiniGenericCluster cluster, String fileName) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    if (Util.WINDOWS) {
        fileName = fileName.replace('\\', '/');
    }
    if (fs.exists(new Path(fileName))) {
        throw new IOException("File " + fileName + " already exists on the minicluster");
    }
    return fs.create(new Path(fileName));
}

25. TestFSBase#createPath()

Project: sentry
Source File: TestFSBase.java
View license
protected void createPath(Path relativePath) throws Exception {
    Path fullPath = getFullPathWithSchemeAndAuthority(relativePath);
    FileSystem adminFS = storageFileSystem;
    LOGGER.info("Creating path " + fullPath);
    if (storageDFSType.equals(DFSType.ClusterDFS)) {
        UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(dfsAdmin, KEYTAB_LOCATION + "/" + dfsAdmin + ".keytab");
        adminFS = getFS(ugi);
    }
    if (adminFS.exists(fullPath)) {
        adminFS.delete(fullPath, true);
    }
    adminFS.mkdirs(fullPath);
}

26. TestLargeObjectLoader#setUp()

Project: sqoop
Source File: TestLargeObjectLoader.java
View license
public void setUp() throws IOException, InterruptedException {
    conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    String tmpDir = System.getProperty("test.build.data", "/tmp/");
    this.outDir = new Path(System.getProperty("java.io.tmpdir"));
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(outDir)) {
        fs.delete(outDir, true);
    }
    fs.mkdirs(outDir);
    loader = new LargeObjectLoader(conf, outDir);
}

27. TezClientUtils#getLRFileStatus()

Project: tez
Source File: TezClientUtils.java
View license
private static FileStatus[] getLRFileStatus(String fileName, Configuration conf) throws IOException {
    URI uri;
    try {
        uri = new URI(fileName);
    } catch (URISyntaxException e) {
        String message = "Invalid URI defined in configuration for" + " location of TEZ jars. providedURI=" + fileName;
        LOG.error(message);
        throw new TezUncheckedException(message, e);
    }
    Path p = new Path(uri);
    FileSystem fs = p.getFileSystem(conf);
    p = fs.resolvePath(p.makeQualified(fs.getUri(), fs.getWorkingDirectory()));
    FileSystem targetFS = p.getFileSystem(conf);
    if (targetFS.isDirectory(p)) {
        return targetFS.listStatus(p);
    } else {
        FileStatus fStatus = targetFS.getFileStatus(p);
        return new FileStatus[] { fStatus };
    }
}

28. StorageHelper#logCommit()

Project: aegisthus
Source File: StorageHelper.java
View license
public void logCommit(String file) throws IOException {
    Path log = commitPath(getTaskId());
    if (debug) {
        LOG.info(String.format("logging (%s) to commit log (%s)", file, log.toUri().toString()));
    }
    FileSystem fs = log.getFileSystem(config);
    DataOutputStream os = null;
    if (fs.exists(log)) {
        os = fs.append(log);
    } else {
        os = fs.create(log);
    }
    os.writeBytes(file);
    os.write('\n');
    os.close();
}

29. Utils#copy()

Project: aegisthus
Source File: Utils.java
View license
public static void copy(Path from, Path to, boolean snappy, Configuration conf) throws IOException {
    FileSystem fromFs = from.getFileSystem(conf);
    FileSystem toFs = to.getFileSystem(conf);
    InputStream in = fromFs.open(from);
    OutputStream out = toFs.create(to, false);
    try {
        if (snappy) {
            in = new SnappyInputStream2(in);
        }
        byte[] buffer = new byte[65536];
        int bytesRead;
        while ((bytesRead = in.read(buffer)) >= 0) {
            if (bytesRead > 0) {
                out.write(buffer, 0, bytesRead);
            }
        }
    } finally {
        in.close();
        out.close();
    }
}

30. Distcp#checkOutputDirectory()

Project: aegisthus
Source File: Distcp.java
View license
/**
	 * checks to see if the output directory exists and throws an error if it
	 * does.
	 * 
	 * TODO: extend this to allow overwrite if set.
	 * 
	 * @throws IOException
	 */
protected void checkOutputDirectory(Job job, String outputDir, boolean overwrite) throws IOException {
    Path out = new Path(outputDir);
    FileSystem fsOut = out.getFileSystem(job.getConfiguration());
    if (fsOut.exists(out)) {
        if (overwrite) {
            fsOut.delete(out, true);
        } else {
            String error = String.format("Ouput directory (%s) exists, failing", outputDir);
            LOG.error(error);
            throw new IOException(error);
        }
    }
}

31. HadoopFileCacheRepository#updateCache()

View license
private void updateCache(Path file, long checksum, Path cachePath, Path cacheChecksumPath) throws IOException {
    if (LOG.isInfoEnabled()) {
        LOG.info(MessageFormat.format("updating library cache: {0} -> {1}", file, cachePath));
    }
    FileSystem sourceFs = file.getFileSystem(configuration);
    FileSystem cacheFs = cachePath.getFileSystem(configuration);
    // remove checksum file -> cachePath
    delete(cacheFs, cacheChecksumPath);
    delete(cacheFs, cachePath);
    // sync source file to cache file
    try (FSDataOutputStream checksumOutput = cacheFs.create(cacheChecksumPath, false)) {
        checksumOutput.writeLong(checksum);
        syncFile(sourceFs, file, cacheFs, cachePath);
    }
}

32. FromFileToHdfsTest#tearDown()

Project: camel
Source File: FromFileToHdfsTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Configuration conf = new Configuration();
    Path dir = new Path("target/outbox");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

33. HdfsConsumerTest#tearDown()

Project: camel
Source File: HdfsConsumerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

34. HdfsProducerConsumerTest#tearDown()

Project: camel
Source File: HdfsProducerConsumerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

35. HdfsProducerSplitTest#tearDown()

Project: camel
Source File: HdfsProducerSplitTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

36. HdfsProducerTest#tearDown()

Project: camel
Source File: HdfsProducerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(250);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

37. FromFileToHdfsTest#tearDown()

Project: camel
Source File: FromFileToHdfsTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Configuration conf = new Configuration();
    Path dir = new Path("target/outbox");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

38. HdfsConsumerTest#tearDown()

Project: camel
Source File: HdfsConsumerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

39. HdfsProducerConsumerTest#tearDown()

Project: camel
Source File: HdfsProducerConsumerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

40. HdfsProducerSplitTest#tearDown()

Project: camel
Source File: HdfsProducerSplitTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(100);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

41. HdfsProducerTest#tearDown()

Project: camel
Source File: HdfsProducerTest.java
View license
@Override
public void tearDown() throws Exception {
    if (!canTest()) {
        return;
    }
    super.tearDown();
    Thread.sleep(250);
    Configuration conf = new Configuration();
    Path dir = new Path("target/test");
    FileSystem fs = FileSystem.get(dir.toUri(), conf);
    fs.delete(dir, true);
}

42. HdfsAppendTest#setUp()

Project: camel
Source File: HdfsAppendTest.java
View license
@Override
public void setUp() throws Exception {
    super.setUp();
    Configuration conf = new Configuration();
    conf.addResource("hdfs-test.xml");
    Path file = new Path("hdfs://localhost:9000/tmp/test/test-camel-simple-write-file1");
    FileSystem fs = FileSystem.get(file.toUri(), conf);
    if (fs.exists(file)) {
        fs.delete(file, true);
    }
    FSDataOutputStream out = fs.create(file);
    for (int i = 0; i < 10; ++i) {
        out.write("PIPPO".getBytes("UTF-8"));
    }
    out.close();
}

43. HBase96Test#createHRegion()

Project: cdap
Source File: HBase96Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

44. HBase98Test#createHRegion()

Project: cdap
Source File: HBase98Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

45. HBase10Test#createHRegion()

Project: cdap
Source File: HBase10Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

46. HBase10CDHTest#createHRegion()

Project: cdap
Source File: HBase10CDHTest.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

47. HBase10CDH550Test#createHRegion()

Project: cdap
Source File: HBase10CDH550Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

48. HBase11Test#createHRegion()

Project: cdap
Source File: HBase11Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

49. HBase12CDH570Test#createHRegion()

Project: cdap
Source File: HBase12CDH570Test.java
View license
@Override
public HRegion createHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
    Path path = new Path(conf.get(HConstants.HBASE_DIR), callingMethod);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, conf, htd);
}

50. AvroUtils#createFileIfNotExists()

Project: Cubert
Source File: AvroUtils.java
View license
public static void createFileIfNotExists(BlockSchema fileSchema, String path) throws IOException {
    Configuration conf = new JobConf();
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(new Path(path)))
        return;
    Schema avroSchema = convertFromBlockSchema("CUBERT_MV_RECORD", fileSchema);
    System.out.println("Creating avro file with schema = " + avroSchema);
    GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(avroSchema);
    DataFileWriter<GenericRecord> writer = new DataFileWriter<GenericRecord>(datumWriter);
    FSDataOutputStream fout = FileSystem.create(fs, new Path(path), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE));
    writer.create(avroSchema, fout);
    writer.flush();
    writer.close();
}

51. BaseHdfsTargetIT#testDirValidity()

Project: datacollector
Source File: BaseHdfsTargetIT.java
View license
@Test
public void testDirValidity() throws Exception {
    //valid dirs
    testDir("/foo", "/foo", true);
    testDir("/foo/${YY()}", "/foo/bar-${YY()}", true);
    //non absolute dir
    testDir("foo", "/foo", false);
    testDir("/foo", "foo", false);
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(new Path("/bar"));
    //no permissions
    testDir("/bar/foo", "/foo", false);
    testDir("/foo", "/bar/foo", false);
    testDir("/bar/foo/${YY()}", "/foo/${YY()}", false);
    testDir("/foo/${YY()}", "/bar/foo/${YY()}", false);
}

52. HdfsAvroSchemaSerIT#verifySerializationLocation()

View license
private void verifySerializationLocation(String location) throws IOException {
    FileSystem fs = BaseHiveIT.getDefaultFileSystem();
    Path path = new Path(BaseHiveIT.getDefaultFsUri() + location);
    Assert.assertTrue("Location does not exist:" + location, fs.exists(path));
    boolean found = false;
    RemoteIterator<LocatedFileStatus> fsIterator = fs.listFiles(path, false);
    while (!found || fsIterator.hasNext()) {
        LocatedFileStatus status = fsIterator.next();
        LOG.info("Found file: " + status.getPath().getName());
        found = status.getPath().getName().startsWith(AVRO_SCHEMA_SERIALIZATION_PREFIX);
    }
    fs.delete(path, true);
    Assert.assertTrue("Avro schema file not found in the location " + location, found);
}

53. HdfsFileTimestampVersionFinder#mostRecentInDir()

View license
private URI mostRecentInDir(final Path dir, final Pattern pattern) throws IOException {
    final PathFilter filter = new PathFilter() {

        @Override
        public boolean accept(Path path) {
            return pattern == null || pattern.matcher(path.getName()).matches();
        }
    };
    long modifiedTime = Long.MIN_VALUE;
    URI mostRecentURI = null;
    final FileSystem fs = dir.getFileSystem(config);
    for (FileStatus status : fs.listStatus(dir, filter)) {
        if (status.isFile()) {
            final long thisModifiedTime = status.getModificationTime();
            if (thisModifiedTime >= modifiedTime) {
                modifiedTime = thisModifiedTime;
                mostRecentURI = status.getPath().toUri();
            }
        }
    }
    return mostRecentURI;
}

54. CopyToHdfs#main()

Project: flume
Source File: CopyToHdfs.java
View license
public static void main(String[] argv) throws IOException {
    if (argv.length < 2) {
        System.out.println("Need to specify arguments <src> <dst>");
        System.exit(-1);
    }
    Configuration conf = new Configuration();
    Path srcPath = new Path(argv[0]);
    FileSystem srcFs = srcPath.getFileSystem(conf);
    Path dstPath = new Path(argv[1]);
    FileSystem dstFs = dstPath.getFileSystem(conf);
    // dfs.copyFromLocalFile(false, psrc, pdst);
    FileUtil.copy(srcFs, srcPath, dstFs, dstPath, false, conf);
}

55. ImportElementsToAccumulo#run()

View license
@Override
public int run(final String[] strings) throws Exception {
    TableUtils.ensureTableExists(store);
    // Hadoop configuration
    final Configuration conf = getConf();
    final FileSystem fs = FileSystem.get(conf);
    // Remove the _SUCCESS file to prevent warning in accumulo
    fs.delete(new Path(inputPath + "/_SUCCESS"), false);
    // Set all permissions
    IngestUtils.setDirectoryPermsForAccumulo(fs, new Path(inputPath));
    // Import the files
    store.getConnection().tableOperations().importDirectory(store.getProperties().getTable(), inputPath, failurePath, false);
    return SUCCESS_RESPONSE;
}

56. BspCase#getSinglePartFileStatus()

Project: giraph
Source File: BspCase.java
View license
/**
   * Get the single part file status and make sure there is only one part
   *
   * @param conf Configuration to get the file system from
   * @param partDirPath Directory where the single part file should exist
   * @return Single part file status
   * @throws IOException
   */
public static FileStatus getSinglePartFileStatus(Configuration conf, Path partDirPath) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    FileStatus singlePartFileStatus = null;
    int partFiles = 0;
    for (FileStatus fileStatus : fs.listStatus(partDirPath)) {
        if (fileStatus.getPath().getName().equals("part-m-00000")) {
            singlePartFileStatus = fileStatus;
        }
        if (fileStatus.getPath().getName().startsWith("part-m-")) {
            ++partFiles;
        }
    }
    Preconditions.checkState(partFiles == 1, "getSinglePartFile: Part file " + "count should be 1, but is " + partFiles);
    return singlePartFileStatus;
}

57. BspCase#getNumResults()

Project: giraph
Source File: BspCase.java
View license
/**
   * Read all parts- files in the output and count their lines.
   * This works only for textual output!
   *
   * @param conf Configuration
   * @param outputPath Output path
   * @return Number of output lines
   * @throws IOException
   */
public int getNumResults(Configuration conf, Path outputPath) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    int numResults = 0;
    for (FileStatus status : fs.listStatus(outputPath, PARTS_FILTER)) {
        FSDataInputStream in = null;
        BufferedReader reader = null;
        try {
            in = fs.open(status.getPath());
            reader = new BufferedReader(new InputStreamReader(in, Charsets.UTF_8));
            while (reader.readLine() != null) {
                numResults++;
            }
        } finally {
            Closeables.closeQuietly(in);
            Closeables.closeQuietly(reader);
        }
    }
    return numResults;
}

58. HdfsWriter#moveSelectFiles()

Project: gobblin
Source File: HdfsWriter.java
View license
public static void moveSelectFiles(String extension, String source, String destination) throws IOException {
    FileSystem fs = getFileSystem();
    fs.mkdirs(new Path(destination));
    FileStatus[] fileStatuses = fs.listStatus(new Path(source));
    for (FileStatus fileStatus : fileStatuses) {
        Path path = fileStatus.getPath();
        if (!fileStatus.isDirectory() && path.toString().toLowerCase().endsWith(extension.toLowerCase())) {
            HadoopUtils.deleteIfExists(fs, new Path(destination), true);
            HadoopUtils.copyPath(fs, path, fs, new Path(destination), getConfiguration());
        }
    }
}

59. RowLevelQualityCheckerTest#testWriteToErrFile()

View license
@Test(groups = { "ignore" })
public void testWriteToErrFile() throws Exception {
    State state = new State();
    state.setProp(ConfigurationKeys.ROW_LEVEL_POLICY_LIST, "gobblin.qualitychecker.TestRowLevelPolicyFail");
    state.setProp(ConfigurationKeys.ROW_LEVEL_POLICY_LIST_TYPE, "ERR_FILE");
    state.setProp(ConfigurationKeys.ROW_LEVEL_ERR_FILE, TestConstants.TEST_ERR_FILE);
    state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, TestConstants.TEST_FS_URI);
    RowLevelPolicyChecker checker = new RowLevelPolicyCheckerBuilderFactory().newPolicyCheckerBuilder(state, -1).build();
    RowLevelPolicyCheckResults results = new RowLevelPolicyCheckResults();
    FileReader<GenericRecord> fileReader = openFile(state);
    for (GenericRecord datum : fileReader) {
        Assert.assertFalse(checker.executePolicies(datum, results));
    }
    FileSystem fs = FileSystem.get(new URI(TestConstants.TEST_FS_URI), new Configuration());
    Path outputPath = new Path(TestConstants.TEST_ERR_FILE, state.getProp(ConfigurationKeys.ROW_LEVEL_POLICY_LIST).replaceAll("\\.", "-") + ".err");
    Assert.assertTrue(fs.exists(outputPath));
    fs.delete(new Path(TestConstants.TEST_ERR_FILE), true);
}

60. GobblinYarnAppLauncher#buildLogCopier()

Project: gobblin
Source File: GobblinYarnAppLauncher.java
View license
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
    FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
    rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());
    LogCopier.Builder builder = LogCopier.newBuilder().useSrcFileSystem(this.fs).useDestFileSystem(rawLocalFs).readFrom(getHdfsLogDir(appWorkDir)).writeTo(sinkLogDir).acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
    if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) {
        builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE));
    }
    if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) {
        builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER));
    }
    return builder.build();
}

61. DFSDirTest#control()

Project: hadoop-20
Source File: DFSDirTest.java
View license
public void control(Configuration fsConfig, String fileName) throws IOException {
    String name = fileName;
    FileSystem fs = FileSystem.get(fsConfig);
    fs.delete(new Path(DFS_INPUT, name), true);
    SequenceFile.Writer write = null;
    for (int i = 0; i < nmaps; i++) {
        try {
            Path controlFile = new Path(DFS_INPUT, name + i);
            write = SequenceFile.createWriter(fs, fsConfig, controlFile, Text.class, LongWritable.class, CompressionType.NONE);
            write.append(new Text(name + i), new LongWritable(this.nthreads));
        } finally {
            if (write != null)
                write.close();
            write = null;
        }
    }
}

62. DFSIOTest#control()

Project: hadoop-20
Source File: DFSIOTest.java
View license
public void control(Configuration fsConfig, String fileName) throws IOException {
    String name = fileName;
    FileSystem fs = FileSystem.get(fsConfig);
    fs.delete(new Path(DFS_INPUT, name), true);
    SequenceFile.Writer write = null;
    for (int i = 0; i < nmaps; i++) {
        try {
            Path controlFile = new Path(DFS_INPUT, name + i);
            write = SequenceFile.createWriter(fs, fsConfig, controlFile, Text.class, LongWritable.class, CompressionType.NONE);
            write.append(new Text(name + i), new LongWritable(fileSize));
        } finally {
            if (write != null)
                write.close();
            write = null;
        }
    }
}

63. CoronaJobTrackerRunner#localizeTaskConfiguration()

View license
/**
   * Copies the job file to the working directory of the process that will be
   * started.
   */
@SuppressWarnings("deprecation")
private void localizeTaskConfiguration(TaskTracker tracker, JobConf ttConf, String workDir, Task t, JobID jobID) throws IOException {
    Path jobFile = new Path(t.getJobFile());
    FileSystem systemFS = tracker.systemFS;
    this.localizedJobFile = new Path(workDir, jobID + ".xml");
    LOG.info("Localizing CJT configuration from " + jobFile + " to " + localizedJobFile);
    systemFS.copyToLocalFile(jobFile, localizedJobFile);
    JobConf localJobConf = new JobConf(localizedJobFile);
    boolean modified = Task.saveStaticResolutions(localJobConf);
    if (modified) {
        FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(localizedJobFile.toUri().getPath()));
        try {
            localJobConf.writeXml(out);
        } catch (IOException e) {
            out.close();
            throw e;
        }
    }
    // Add the values from the job conf to the configuration of this runner
    this.conf.addResource(localizedJobFile);
}

64. TestFileQueue#setup()

Project: hadoop-20
Source File: TestFileQueue.java
View license
@BeforeClass
public static void setup() throws IOException {
    final Configuration conf = new Configuration();
    final FileSystem fs = FileSystem.getLocal(conf).getRaw();
    final Path p = new Path(System.getProperty("test.build.data", "/tmp"), "testFileQueue").makeQualified(fs);
    fs.delete(p, true);
    final byte[] b = new byte[BLOCK];
    for (int i = 0; i < NFILES; ++i) {
        Arrays.fill(b, (byte) ('A' + i));
        paths[i] = new Path(p, "" + (char) ('A' + i));
        OutputStream f = null;
        try {
            f = fs.create(paths[i]);
            f.write(b);
        } finally {
            if (f != null) {
                f.close();
            }
        }
    }
}

65. TestBlockCopier#writeExcludesFileAndRefresh()

Project: hadoop-20
Source File: TestBlockCopier.java
View license
private void writeExcludesFileAndRefresh(ArrayList<String> nodes) throws IOException {
    FileSystem fs = FileSystem.getLocal(conf);
    LOG.info(fs);
    // delete if it already exists
    if (fs.exists(excludeFile)) {
        fs.delete(excludeFile, true);
    }
    FSDataOutputStream stm = fs.create(excludeFile);
    if (nodes != null) {
        for (String node : nodes) {
            stm.writeBytes(node);
            stm.writeBytes("\n");
        }
    }
    stm.close();
    namenode.namesystem.refreshNodes(conf);
}

66. FsShell#touchz()

Project: hadoop-20
Source File: FsShell.java
View license
/**
   * (Re)create zero-length file at the specified path.
   * This will be replaced by a more UNIX-like touch when files may be
   * modified.
   */
void touchz(String src) throws IOException {
    Path f = new Path(src);
    FileSystem srcFs = f.getFileSystem(getConf());
    FileStatus st;
    if (srcFs.exists(f)) {
        st = srcFs.getFileStatus(f);
        if (st.isDir()) {
            // TODO: handle this
            throw new IOException(src + " is a directory");
        } else if (st.getLen() != 0)
            throw new IOException(src + " must be a zero-length file");
    }
    FSDataOutputStream out = srcFs.create(f);
    out.close();
}

67. FsShell#copy()

Project: hadoop-20
Source File: FsShell.java
View license
/**
   * Copy files that match the file pattern <i>srcf</i>
   * to a destination file.
   * When copying mutiple files, the destination must be a directory.
   * Otherwise, IOException is thrown.
   * @param srcf a file pattern specifying source files
   * @param dstf a destination local file/directory
   * @throws IOException
   * @see org.apache.hadoop.fs.FileSystem#globStatus(Path)
   */
void copy(String srcf, String dstf, Configuration conf) throws IOException {
    Path srcPath = new Path(srcf);
    FileSystem srcFs = srcPath.getFileSystem(getConf());
    Path dstPath = new Path(dstf);
    FileSystem dstFs = dstPath.getFileSystem(getConf());
    Path[] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath);
    if (srcs.length > 1 && !dstFs.isDirectory(dstPath)) {
        throw new IOException("When copying multiple files, " + "destination should be a directory.");
    }
    for (int i = 0; i < srcs.length; i++) {
        FileUtil.copy(srcFs, srcs[i], dstFs, dstPath, false, conf);
    }
}

68. FsShell#hardlink()

Project: hadoop-20
Source File: FsShell.java
View license
private int hardlink(String argv[]) throws IOException {
    if (argv.length != 3) {
        throw new IllegalArgumentException("Must specify exactly two files to hardlink");
    }
    if (argv[1] == null || argv[2] == null) {
        throw new IllegalArgumentException("One of the arguments is null");
    }
    Path src = new Path(argv[1]);
    Path dst = new Path(argv[2]);
    FileSystem srcFs = src.getFileSystem(getConf());
    FileSystem dstFs = dst.getFileSystem(getConf());
    if (!srcFs.getUri().equals(dstFs.getUri())) {
        throw new IllegalArgumentException("Source and Destination files are" + " on different filesystems");
    }
    return (srcFs.hardLink(src, dst)) ? 0 : -1;
}

69. TestBlockReaderLocal#testFederatedClustersIpcPorts()

Project: hadoop-20
Source File: TestBlockReaderLocal.java
View license
@Test
public void testFederatedClustersIpcPorts() throws IOException {
    String prefix = getClass().getSimpleName() + "-federated-";
    MiniDFSCluster cluster = newFederatedMiniDFSCluster(prefix + random.nextInt(Integer.MAX_VALUE));
    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);
    try {
        // check that / exists
        assertDir("/", fs1, fs2);
        Path file = new Path("file.dat");
        byte[] fileData1 = writeRandomFile(fs1, file, 0x1234, FILE_SIZE);
        byte[] fileData2 = writeRandomFile(fs2, file, 0xabcd, FILE_SIZE);
        assertFile(fs1, file, FILE_SIZE, fileData1);
        assertFile(fs2, file, FILE_SIZE, fileData2);
    } finally {
        fs1.close();
        cluster.shutdown();
        fs2.close();
    }
}

70. TestFastCopyBadDatanode#testBadDatanodeCrossFS()

View license
@Test
public void testBadDatanodeCrossFS() throws Exception {
    DFSTestUtil util = new DFSTestUtil("testBadDatanodeCrossFS", 3, 1, MAX_FILE_SIZE);
    String topDir = "/testBadDatanodeCrossFS";
    FileSystem fs1 = fileSystems[0];
    FileSystem fs2 = fileSystems[1];
    util.createFiles(fs1, topDir);
    FastCopy fastCopy = new FastCopy(conf);
    cluster.shutdownDataNode(0, true);
    try {
        for (String fileName : util.getFileNames(topDir)) {
            fastCopy.copy(fileName, fileName + "dst", (DistributedFileSystem) fs1, (DistributedFileSystem) fs2);
        }
        Map<DatanodeInfo, Integer> dnErrors = fastCopy.getDatanodeErrors();
        assertEquals(1, dnErrors.size());
        int errors = dnErrors.values().iterator().next();
        assertTrue(errors >= conf.getInt("dfs.fastcopy.max.datanode.errors", 3) + 1);
    } finally {
        fastCopy.shutdown();
    }
}

71. TestFileAppend4#testAppendSyncBbw()

Project: hadoop-20
Source File: TestFileAppend4.java
View license
// test [1 bbw, 0 HDFS block]
public void testAppendSyncBbw() throws Exception {
    LOG.info("START");
    cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs1 = cluster.getFileSystem();
    ;
    FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
    try {
        createFile(fs1, "/bbw.test", 1, BBW_SIZE);
        stm.sync();
        // empty before close()
        assertFileSize(fs1, 1);
        loseLeases(fs1);
        recoverFile(fs2);
        // close() should write recovered bbw to HDFS block
        assertFileSize(fs2, BBW_SIZE);
        checkFile(fs2, BBW_SIZE);
    } finally {
        fs2.close();
        fs1.close();
        cluster.shutdown();
    }
    LOG.info("STOP");
}

72. TestFileCorruption#testFileCorruptionHelper()

Project: hadoop-20
Source File: TestFileCorruption.java
View license
private void testFileCorruptionHelper(Configuration conf) throws Exception {
    Path file = new Path(TEST_ROOT_DIR, "corruptFile");
    FileSystem fs = FileSystem.getLocal(conf);
    DataOutputStream dos = fs.create(file);
    dos.writeBytes("original bytes");
    dos.close();
    // Now deliberately corrupt the file
    dos = new DataOutputStream(new FileOutputStream(file.toString()));
    dos.writeBytes("corruption");
    dos.close();
    // Now attempt to read the file
    DataInputStream dis = fs.open(file, 512);
    try {
        System.out.println("A ChecksumException is expected to be logged.");
        dis.readByte();
    } catch (ChecksumException ignore) {
    }
    fs.delete(file, true);
}

73. TestHDFSFileSystemContract#testUniqueFileSystem()

View license
public void testUniqueFileSystem() throws Throwable {
    FileSystem fs1 = cluster.getUniqueFileSystem();
    FileSystem fs2 = cluster.getUniqueFileSystem();
    try {
        DistributedFileSystem dfs1 = DFSUtil.convertToDFS(fs1);
        DistributedFileSystem dfs2 = DFSUtil.convertToDFS(fs2);
        TestCase.assertFalse(dfs1.equals(dfs2));
        String clientName1 = dfs1.dfs.clientName;
        String clientName2 = dfs2.dfs.clientName;
        TestCase.assertFalse(clientName1.equals(clientName2));
        TestCase.assertFalse(clientName1.split("_")[2].equals(clientName2.split("_")[2]));
    } finally {
        fs1.close();
        fs2.close();
    }
}

74. TestMultipleOutputs#setUp()

Project: hadoop-20
Source File: TestMultipleOutputs.java
View license
public void setUp() throws Exception {
    super.setUp();
    Path rootDir = getDir(ROOT_DIR);
    Path inDir = getDir(IN_DIR);
    JobConf conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
}

75. TestKillSubProcesses#runTests()

Project: hadoop-20
Source File: TestKillSubProcesses.java
View license
void runTests(JobConf conf, JobTracker jt) throws IOException {
    FileSystem fs = FileSystem.getLocal(mr.createJobConf());
    Path rootDir = new Path(TEST_ROOT_DIR);
    if (!fs.exists(rootDir)) {
        fs.mkdirs(rootDir);
    }
    fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    runKillingJobAndValidate(jt, conf);
    runFailingJobAndValidate(jt, conf);
    runSuccessfulJobAndValidate(jt, conf);
}

76. TestLineRecordReader#testCustomRecordDelimiters()

Project: hadoop-20
Source File: TestLineRecordReader.java
View license
/**
   * Test the case when a custom record delimiter is specified using the
   * textinputformat.record.delimiter configuration property
   * 
   * @throws IOException
   * @throws InterruptedException
   * @throws ClassNotFoundException
   */
@Test
public void testCustomRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException {
    Configuration conf = new Configuration();
    conf.set("textinputformat.record.delimiter", "\t\n");
    FileSystem localFs = FileSystem.getLocal(conf);
    // cleanup
    localFs.delete(workDir, true);
    // creating input test file
    createInputFile(conf);
    createAndRunJob(conf);
    String expected = "0\tabc\ndef\n9\tghi\njkl\n";
    this.assertEquals(expected, readOutputFile(conf));
}

77. TestLineRecordReader#testDefaultRecordDelimiters()

Project: hadoop-20
Source File: TestLineRecordReader.java
View license
/**
   * Test the default behavior when the textinputformat.record.delimiter
   * configuration property is not specified
   * 
   * @throws IOException
   * @throws InterruptedException
   * @throws ClassNotFoundException
   */
@Test
public void testDefaultRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException {
    Configuration conf = new Configuration();
    FileSystem localFs = FileSystem.getLocal(conf);
    // cleanup
    localFs.delete(workDir, true);
    // creating input test file
    createInputFile(conf);
    createAndRunJob(conf);
    String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
    this.assertEquals(expected, readOutputFile(conf));
}

78. TestMapRed#printFiles()

Project: hadoop-20
Source File: TestMapRed.java
View license
private static void printFiles(Path dir, Configuration conf) throws IOException {
    FileSystem fs = dir.getFileSystem(conf);
    for (FileStatus f : fs.listStatus(dir)) {
        System.out.println("Reading " + f.getPath() + ": ");
        if (f.isDir()) {
            System.out.println("  it is a map file.");
            printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
        } else if (isSequenceFile(fs, f.getPath())) {
            System.out.println("  it is a sequence file.");
            printSequenceFile(fs, f.getPath(), conf);
        } else {
            System.out.println("  it is a text file.");
            printTextFile(fs, f.getPath());
        }
    }
}

79. UtilsForTests#writeFile()

Project: hadoop-20
Source File: UtilsForTests.java
View license
static void writeFile(NameNode namenode, Configuration conf, Path name, short replication) throws IOException {
    FileSystem fileSys = FileSystem.get(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, BytesWritable.class, BytesWritable.class, CompressionType.NONE);
    writer.append(new BytesWritable(), new BytesWritable());
    writer.close();
    fileSys.setReplication(name, replication);
    DFSTestUtil.waitReplication(fileSys, name, replication);
}

80. TestLocalRunner#createMultiMapsInput()

Project: hadoop-20
Source File: TestLocalRunner.java
View license
/**
   * Create the inputs for the MultiMaps test.
   * @return the path to the input directory.
   */
private Path createMultiMapsInput() throws IOException {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    Path inputPath = getInputPath();
    // Clear the input directory if it exists, first.
    if (fs.exists(inputPath)) {
        fs.delete(inputPath, true);
    }
    // Create input files, with sizes calibrated based on
    // the amount of work done in each mapper.
    createInputFile(inputPath, 0, 50000);
    createInputFile(inputPath, 1, 500);
    createInputFile(inputPath, 2, 500);
    createInputFile(inputPath, 3, 20);
    createInputFile(inputPath, 4, 5000);
    createInputFile(inputPath, 5, 500);
    return inputPath;
}

81. DumpTypedBytes#run()

Project: hadoop-common
Source File: DumpTypedBytes.java
View license
/**
   * The main driver for <code>DumpTypedBytes</code>.
   */
public int run(String[] args) throws Exception {
    Path pattern = new Path(args[0]);
    FileSystem fs = pattern.getFileSystem(getConf());
    fs.setVerifyChecksum(true);
    for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
        List<FileStatus> inputFiles = new ArrayList<FileStatus>();
        FileStatus status = fs.getFileStatus(p);
        if (status.isDir()) {
            FileStatus[] files = fs.listStatus(p);
            Collections.addAll(inputFiles, files);
        } else {
            inputFiles.add(status);
        }
        return dumpTypedBytes(inputFiles);
    }
    return -1;
}

82. TestFileCorruption#testLocalFileCorruption()

View license
/** check if local FS can handle corrupted blocks properly */
public void testLocalFileCorruption() throws Exception {
    Configuration conf = new Configuration();
    Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
    FileSystem fs = FileSystem.getLocal(conf);
    DataOutputStream dos = fs.create(file);
    dos.writeBytes("original bytes");
    dos.close();
    // Now deliberately corrupt the file
    dos = new DataOutputStream(new FileOutputStream(file.toString()));
    dos.writeBytes("corruption");
    dos.close();
    // Now attempt to read the file
    DataInputStream dis = fs.open(file, 512);
    try {
        System.out.println("A ChecksumException is expected to be logged.");
        dis.readByte();
    } catch (ChecksumException ignore) {
    }
    fs.delete(file, true);
}

83. TestMultipleOutputs#setUp()

View license
public void setUp() throws Exception {
    super.setUp();
    Path rootDir = getDir(ROOT_DIR);
    Path inDir = getDir(IN_DIR);
    JobConf conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
}

84. TestMapRed#printFiles()

Project: hadoop-common
Source File: TestMapRed.java
View license
private static void printFiles(Path dir, Configuration conf) throws IOException {
    FileSystem fs = dir.getFileSystem(conf);
    for (FileStatus f : fs.listStatus(dir)) {
        System.out.println("Reading " + f.getPath() + ": ");
        if (f.isDir()) {
            System.out.println("  it is a map file.");
            printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
        } else if (isSequenceFile(fs, f.getPath())) {
            System.out.println("  it is a sequence file.");
            printSequenceFile(fs, f.getPath(), conf);
        } else {
            System.out.println("  it is a text file.");
            printTextFile(fs, f.getPath());
        }
    }
}

85. UtilsForTests#writeFile()

Project: hadoop-common
Source File: UtilsForTests.java
View license
static void writeFile(NameNode namenode, Configuration conf, Path name, short replication) throws IOException {
    FileSystem fileSys = FileSystem.get(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, BytesWritable.class, BytesWritable.class, CompressionType.NONE);
    writer.append(new BytesWritable(), new BytesWritable());
    writer.close();
    fileSys.setReplication(name, replication);
    DFSTestUtil.waitReplication(fileSys, name, replication);
}

86. TestFileCorruption#testLocalFileCorruption()

Project: hadoop-hdfs
Source File: TestFileCorruption.java
View license
/** check if local FS can handle corrupted blocks properly */
public void testLocalFileCorruption() throws Exception {
    Configuration conf = new Configuration();
    Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
    FileSystem fs = FileSystem.getLocal(conf);
    DataOutputStream dos = fs.create(file);
    dos.writeBytes("original bytes");
    dos.close();
    // Now deliberately corrupt the file
    dos = new DataOutputStream(new FileOutputStream(file.toString()));
    dos.writeBytes("corruption");
    dos.close();
    // Now attempt to read the file
    DataInputStream dis = fs.open(file, 512);
    try {
        System.out.println("A ChecksumException is expected to be logged.");
        dis.readByte();
    } catch (ChecksumException ignore) {
    }
    fs.delete(file, true);
}

87. HiveImport#removeTempLogs()

Project: hadoop-mapreduce
Source File: HiveImport.java
View license
/**
   * If we used a MapReduce-based upload of the data, remove the _logs dir
   * from where we put it, before running Hive LOAD DATA INPATH
   */
private void removeTempLogs(String tableName) throws IOException {
    FileSystem fs = FileSystem.get(configuration);
    String warehouseDir = options.getWarehouseDir();
    Path tablePath;
    if (warehouseDir != null) {
        tablePath = new Path(new Path(warehouseDir), tableName);
    } else {
        tablePath = new Path(tableName);
    }
    Path logsPath = new Path(tablePath, "_logs");
    if (fs.exists(logsPath)) {
        LOG.info("Removing temporary files from import process: " + logsPath);
        if (!fs.delete(logsPath, true)) {
            LOG.warn("Could not delete temporary files; continuing with import, but it may fail.");
        }
    }
}

88. DirectImportUtils#createHdfsSink()

View license
/**
   * Open a file in HDFS for write to hold the data associated with a table.
   * Creates any necessary directories, and returns the OutputStream to write
   * to. The caller is responsible for calling the close() method on the
   * returned stream.
   */
public static SplittableBufferedWriter createHdfsSink(Configuration conf, ImportOptions options, String tableName) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    String warehouseDir = options.getWarehouseDir();
    Path destDir = null;
    if (null != warehouseDir) {
        destDir = new Path(new Path(warehouseDir), tableName);
    } else {
        destDir = new Path(tableName);
    }
    LOG.debug("Writing to filesystem: " + conf.get("fs.default.name"));
    LOG.debug("Creating destination directory " + destDir);
    fs.mkdirs(destDir);
    // This Writer will be closed by the caller.
    return new SplittableBufferedWriter(new SplittingOutputStream(conf, destDir, "data-", options.getDirectSplitSize(), options.shouldUseCompression()));
}

89. TestSplittableBufferedWriter#ensureEmptyWriteDir()

View license
/** Create the directory where we'll write our test files to; and
   * make sure it has no files in it.
   */
private void ensureEmptyWriteDir() throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    Path writeDir = getWritePath();
    fs.mkdirs(writeDir);
    FileStatus[] stats = fs.listStatus(writeDir);
    for (FileStatus stat : stats) {
        if (stat.isDir()) {
            fail("setUp(): Write directory " + writeDir + " contains subdirectories");
        }
        LOG.debug("setUp(): Removing " + stat.getPath());
        if (!fs.delete(stat.getPath(), false)) {
            fail("setUp(): Could not delete residual file " + stat.getPath());
        }
    }
    if (!fs.exists(writeDir)) {
        fail("setUp: Could not create " + writeDir);
    }
}

90. DumpTypedBytes#run()

View license
/**
   * The main driver for <code>DumpTypedBytes</code>.
   */
public int run(String[] args) throws Exception {
    Path pattern = new Path(args[0]);
    FileSystem fs = pattern.getFileSystem(getConf());
    fs.setVerifyChecksum(true);
    for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
        List<FileStatus> inputFiles = new ArrayList<FileStatus>();
        FileStatus status = fs.getFileStatus(p);
        if (status.isDir()) {
            FileStatus[] files = fs.listStatus(p);
            Collections.addAll(inputFiles, files);
        } else {
            inputFiles.add(status);
        }
        return dumpTypedBytes(inputFiles);
    }
    return -1;
}

91. TaskRunner#writeLocalTaskFile()

Project: hadoop-mapreduce
Source File: TaskRunner.java
View license
/**
   * Write the task specific job-configuration file.
   * 
   * @param localFs
   * @throws IOException
   */
private static void writeLocalTaskFile(String jobFile, JobConf conf) throws IOException {
    Path localTaskFile = new Path(jobFile);
    FileSystem localFs = FileSystem.getLocal(conf);
    localFs.delete(localTaskFile, true);
    OutputStream out = localFs.create(localTaskFile);
    try {
        conf.writeXml(out);
    } finally {
        out.close();
    }
}

92. TestMultipleOutputs#setUp()

View license
public void setUp() throws Exception {
    super.setUp();
    Path rootDir = getDir(ROOT_DIR);
    Path inDir = getDir(IN_DIR);
    JobConf conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
}

93. TestKillSubProcesses#runTests()

View license
void runTests(JobConf conf, JobTracker jt) throws IOException {
    FileSystem fs = FileSystem.getLocal(mr.createJobConf());
    Path rootDir = new Path(TEST_ROOT_DIR);
    if (!fs.exists(rootDir)) {
        fs.mkdirs(rootDir);
    }
    fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    runKillingJobAndValidate(jt, conf);
    runFailingJobAndValidate(jt, conf);
    runSuccessfulJobAndValidate(jt, conf);
}

94. TestMapRed#printFiles()

Project: hadoop-mapreduce
Source File: TestMapRed.java
View license
private static void printFiles(Path dir, Configuration conf) throws IOException {
    FileSystem fs = dir.getFileSystem(conf);
    for (FileStatus f : fs.listStatus(dir)) {
        System.out.println("Reading " + f.getPath() + ": ");
        if (f.isDir()) {
            System.out.println("  it is a map file.");
            printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
        } else if (isSequenceFile(fs, f.getPath())) {
            System.out.println("  it is a sequence file.");
            printSequenceFile(fs, f.getPath(), conf);
        } else {
            System.out.println("  it is a text file.");
            printTextFile(fs, f.getPath());
        }
    }
}

95. UtilsForTests#writeFile()

Project: hadoop-mapreduce
Source File: UtilsForTests.java
View license
static void writeFile(NameNode namenode, Configuration conf, Path name, short replication) throws IOException {
    FileSystem fileSys = FileSystem.get(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, BytesWritable.class, BytesWritable.class, CompressionType.NONE);
    writer.append(new BytesWritable(), new BytesWritable());
    writer.close();
    fileSys.setReplication(name, replication);
    DFSTestUtil.waitReplication(fileSys, name, replication);
}

96. TestMultipleInputs#setUp()

View license
@Before
public void setUp() throws Exception {
    super.setUp();
    Path rootDir = getDir(ROOT_DIR);
    Path in1Dir = getDir(IN1_DIR);
    Path in2Dir = getDir(IN2_DIR);
    Configuration conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    if (!fs.mkdirs(in1Dir)) {
        throw new IOException("Mkdirs failed to create " + in1Dir.toString());
    }
    if (!fs.mkdirs(in2Dir)) {
        throw new IOException("Mkdirs failed to create " + in2Dir.toString());
    }
}

97. TestMapReduce#printFiles()

Project: hadoop-mapreduce
Source File: TestMapReduce.java
View license
private static void printFiles(Path dir, Configuration conf) throws IOException {
    FileSystem fs = dir.getFileSystem(conf);
    for (FileStatus f : fs.listStatus(dir)) {
        System.out.println("Reading " + f.getPath() + ": ");
        if (f.isDir()) {
            System.out.println("  it is a map file.");
            printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
        } else if (isSequenceFile(fs, f.getPath())) {
            System.out.println("  it is a sequence file.");
            printSequenceFile(fs, f.getPath(), conf);
        } else {
            System.out.println("  it is a text file.");
            printTextFile(fs, f.getPath());
        }
    }
}

98. MasterObserverExample#postCreateTable()

View license
// vv MasterObserverExample
@Override
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
    // ^^ MasterObserverExample
    LOG.debug("Got postCreateTable callback");
    // vv MasterObserverExample
    // co MasterObserverExample-1-GetName Get the new table's name from the table descriptor.
    TableName tableName = desc.getTableName();
    // ^^ MasterObserverExample
    LOG.debug("Created table: " + tableName + ", region count: " + regions.length);
    // vv MasterObserverExample
    MasterServices services = ctx.getEnvironment().getMasterServices();
    // co MasterObserverExample-2-Services Get the available services and retrieve a reference to the actual file system.
    MasterFileSystem masterFileSystem = services.getMasterFileSystem();
    FileSystem fileSystem = masterFileSystem.getFileSystem();
    // co MasterObserverExample-3-Path Create a new directory that will store binary data from the client application.
    Path blobPath = new Path(tableName.getQualifierAsString() + "-blobs");
    fileSystem.mkdirs(blobPath);
    // ^^ MasterObserverExample
    LOG.debug("Created " + blobPath + ": " + fileSystem.exists(blobPath));
// vv MasterObserverExample
}

99. HCatOutputFormat#getRecordWriter()

Project: hcatalog
Source File: HCatOutputFormat.java
View license
/**
     * Get the record writer for the job. Uses the Table's default OutputStorageDriver
     * to get the record writer.
     * @param context the information about the current task.
     * @return a RecordWriter to write the output for the job.
     * @throws IOException
     */
@Override
public RecordWriter<WritableComparable<?>, HCatRecord> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
    // First create the RW.
    HCatRecordWriter rw = new HCatRecordWriter(context);
    // Now set permissions and group on freshly created files.
    OutputJobInfo info = getJobInfo(context);
    Path workFile = rw.getStorageDriver().getWorkFilePath(context, info.getLocation());
    Path tblPath = new Path(info.getTable().getSd().getLocation());
    FileSystem fs = tblPath.getFileSystem(context.getConfiguration());
    FileStatus tblPathStat = fs.getFileStatus(tblPath);
    fs.setPermission(workFile, tblPathStat.getPermission());
    try {
        fs.setOwner(workFile, null, tblPathStat.getGroup());
    } catch (AccessControlException ace) {
    }
    return rw;
}

100. Utils#serialZipfCore()

Project: HiBench
Source File: Utils.java
View license
/***
	 * Steps to make a ZipfCore available for each job
	 * Client side
	 * 		1. Zipfian creates its corresponding ZipfCore object
	 * 		2. serialize the ZipfCore into a hdfs file
	 * 		3. share the hdfs file by putting it into distributed cache file
	 * Job side
	 * 		1. read object from distributed cache file to re-create the ZipfCore
	 * @throws IOException 
	 */
private static final void serialZipfCore(Zipfian zipfian, Path hdfs_zipf) throws IOException {
    Utils.checkHdfsPath(hdfs_zipf);
    FileSystem fs = hdfs_zipf.getFileSystem(new Configuration());
    FSDataOutputStream fout = fs.create(hdfs_zipf);
    ObjectOutputStream so = new ObjectOutputStream(fout);
    ZipfCore core = zipfian.createZipfCore();
    so.writeObject(core);
    so.close();
    fout.close();
    fs.close();
}