org.apache.hadoop.fs.FileStatus

Here are the examples of the java api class org.apache.hadoop.fs.FileStatus taken from open source projects.

1. TestFSTableDescriptors#testTableInfoFileStatusComparator()

Project: hindex
Source File: TestFSTableDescriptors.java
View license
@Test
public void testTableInfoFileStatusComparator() {
    FileStatus bare = new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
    FileStatus future = new FileStatus(0, false, 0, 0, -1, new Path("/tmp/tablinfo." + System.currentTimeMillis()));
    FileStatus farFuture = new FileStatus(0, false, 0, 0, -1, new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
    FileStatus[] alist = { bare, future, farFuture };
    FileStatus[] blist = { bare, farFuture, future };
    FileStatus[] clist = { farFuture, bare, future };
    FSTableDescriptors.FileStatusFileNameComparator c = new FSTableDescriptors.FileStatusFileNameComparator();
    Arrays.sort(alist, c);
    Arrays.sort(blist, c);
    Arrays.sort(clist, c);
    // Now assert all sorted same in way we want.
    for (int i = 0; i < alist.length; i++) {
        assertTrue(alist[i].equals(blist[i]));
        assertTrue(blist[i].equals(clist[i]));
        assertTrue(clist[i].equals(i == 0 ? farFuture : i == 1 ? future : bare));
    }
}

2. TestFSTableDescriptors#testFormatTableInfoSequenceId()

Project: hindex
Source File: TestFSTableDescriptors.java
View license
@Test
public void testFormatTableInfoSequenceId() {
    Path p0 = assertWriteAndReadSequenceid(0);
    // Assert p0 has format we expect.
    StringBuilder sb = new StringBuilder();
    for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
        sb.append("0");
    }
    assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(), p0.getName());
    // Check a few more.
    Path p2 = assertWriteAndReadSequenceid(2);
    Path p10000 = assertWriteAndReadSequenceid(10000);
    // Get a .tablinfo that has no sequenceid suffix.
    Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
    FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
    FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
    FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
    FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
    FSTableDescriptors.FileStatusFileNameComparator comparator = new FSTableDescriptors.FileStatusFileNameComparator();
    assertTrue(comparator.compare(fs, fs0) > 0);
    assertTrue(comparator.compare(fs0, fs2) > 0);
    assertTrue(comparator.compare(fs2, fs10000) > 0);
}

3. DFSFileTable#getSizeAndLastModified()

Project: kylin
Source File: DFSFileTable.java
View license
public static Pair<Long, Long> getSizeAndLastModified(String path) throws IOException {
    FileSystem fs = HadoopUtil.getFileSystem(path);
    // get all contained files if path is directory
    ArrayList<FileStatus> allFiles = new ArrayList<>();
    FileStatus status = fs.getFileStatus(new Path(path));
    if (status.isFile()) {
        allFiles.add(status);
    } else {
        FileStatus[] listStatus = fs.listStatus(new Path(path));
        allFiles.addAll(Arrays.asList(listStatus));
    }
    long size = 0;
    long lastModified = 0;
    for (FileStatus file : allFiles) {
        size += file.getLen();
        lastModified = Math.max(lastModified, file.getModificationTime());
    }
    return Pair.newPair(size, lastModified);
}

4. DirectFileIoProcessorRunTest#find()

View license
private List<Path> find(String target) throws IOException {
    FileSystem fs = FileSystem.get(tester.configuration());
    FileStatus[] list = fs.globStatus(getPath(target));
    if (list == null) {
        return Collections.emptyList();
    }
    List<Path> results = new ArrayList<>();
    for (FileStatus file : list) {
        results.add(file.getPath());
    }
    return results;
}

5. HadoopDriver#copyFromHadoop()

Project: asakusafw
Source File: HadoopDriver.java
View license
private void copyFromHadoop(Location location, File targetDirectory) throws IOException {
    targetDirectory.mkdirs();
    logger.info("copy {} to {}", location, targetDirectory);
    Path path = new Path(location.toPath('/'));
    FileSystem fs = path.getFileSystem(configuration);
    FileStatus[] list = fs.globStatus(path);
    if (list == null) {
        throw new IOException(MessageFormat.format("Failed to fs -get: source={0}, destination={1}", path, targetDirectory));
    }
    for (FileStatus status : list) {
        Path p = status.getPath();
        try {
            fs.copyToLocalFile(p, new Path(new File(targetDirectory, p.getName()).toURI()));
        } catch (IOException e) {
            throw new IOException(MessageFormat.format("Failed to fs -get: source={0}, destination={1}", p, targetDirectory), e);
        }
    }
}

6. TemporaryInputPreparator#delete()

View license
static void delete(FileSystem fs, Path target) throws IOException {
    FileStatus[] stats = fs.globStatus(target);
    if (stats == null || stats.length == 0) {
        return;
    }
    for (FileStatus s : stats) {
        Path path = s.getPath();
        //$NON-NLS-1$
        LOG.debug("deleting file: {}", path);
        boolean succeed = fs.delete(path, true);
        //$NON-NLS-1$
        LOG.debug("deleted file (succeed={}): {}", succeed, path);
    }
}

7. SplitInput#splitDirectory()

Project: book
Source File: SplitInput.java
View license
/** Perform a split on the specified directory by calling {@link #splitFile(Path)} on each file found within that
   *  directory.
   */
public void splitDirectory(Path inputDir) throws IOException {
    if (fs.getFileStatus(inputDir) == null) {
        throw new IOException(inputDir + " does not exist");
    } else if (!fs.getFileStatus(inputDir).isDir()) {
        throw new IOException(inputDir + " is not a directory");
    }
    // input dir contains one file per category.
    FileStatus[] fileStats = fs.listStatus(inputDir);
    for (FileStatus inputFile : fileStats) {
        if (!inputFile.isDir()) {
            splitFile(inputFile.getPath());
        }
    }
}

8. HdfsProducerSplitTest#doTest()

Project: camel
Source File: HdfsProducerSplitTest.java
View license
private void doTest(int routeNr) throws Exception {
    if (!canTest()) {
        return;
    }
    for (int i = 0; i < 10; ++i) {
        template.sendBody("direct:start" + routeNr, "CIAO" + i);
    }
    stopCamelContext();
    FileSystem fs = FileSystem.get(new Configuration());
    FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + routeNr));
    assertEquals(10, status.length);
    for (FileStatus fileStatus : status) {
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(fileStatus.getPath())));
        assertTrue(br.readLine().startsWith("CIAO"));
        assertNull(br.readLine());
    }
}

9. HdfsProducerSplitTest#doTest()

Project: camel
Source File: HdfsProducerSplitTest.java
View license
private void doTest(int routeNr) throws Exception {
    if (!canTest()) {
        return;
    }
    for (int i = 0; i < 10; ++i) {
        template.sendBody("direct:start" + routeNr, "CIAO" + i);
    }
    stopCamelContext();
    FileSystem fs = FileSystem.get(new Configuration());
    FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + routeNr));
    assertEquals(10, status.length);
    for (FileStatus fileStatus : status) {
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(fileStatus.getPath())));
        assertTrue(br.readLine().startsWith("CIAO"));
        assertNull(br.readLine());
    }
}

10. HdfsFileObject#doListChildren()

Project: commons-vfs
Source File: HdfsFileObject.java
View license
/**
     * @see org.apache.commons.vfs2.provider.AbstractFileObject#doListChildren()
     */
@Override
protected String[] doListChildren() throws Exception {
    if (this.doGetType() != FileType.FOLDER) {
        throw new FileNotFolderException(this);
    }
    final FileStatus[] files = this.hdfs.listStatus(this.path);
    final String[] children = new String[files.length];
    int i = 0;
    for (final FileStatus status : files) {
        children[i++] = status.getPath().getName();
    }
    return children;
}

11. ListFiles#listFiles()

Project: Cubert
Source File: ListFiles.java
View license
private void listFiles(FileSystem fs, Path path, List<String> files) throws IOException {
    FileStatus[] allStatus = fs.listStatus(path);
    if (allStatus == null)
        return;
    for (FileStatus status : allStatus) {
        if (status.isDir()) {
            listFiles(fs, status.getPath(), files);
        } else {
            files.add(status.getPath().toUri().getPath());
        }
    }
}

12. AvroStorageUtils#getLast()

Project: Cubert
Source File: AvroStorageUtils.java
View license
/** get last file of a hdfs path if it is  a directory;
     *   or return the file itself if path is a file
     */
public static Path getLast(Path path, FileSystem fs) throws IOException {
    FileStatus status = fs.getFileStatus(path);
    if (!status.isDir()) {
        return path;
    }
    FileStatus[] statuses = fs.listStatus(path, PATH_FILTER);
    if (statuses.length == 0) {
        return null;
    } else {
        Arrays.sort(statuses);
        for (int i = statuses.length - 1; i >= 0; i--) {
            if (!statuses[i].isDir()) {
                return statuses[i].getPath();
            }
        }
        return null;
    }
}

13. FileSystemUtils#getGlobPaths()

Project: Cubert
Source File: FileSystemUtils.java
View license
public static List<Path> getGlobPaths(FileSystem fs, Path path) throws IOException {
    List<Path> paths = new ArrayList<Path>();
    FileStatus[] fileStatus = fs.globStatus(path);
    if (fileStatus == null)
        throw new IOException("Cannot determine paths at " + path.toString());
    for (FileStatus status : fileStatus) {
        paths.add(status.getPath());
    }
    return paths;
}

14. HdfsDestinationPipelineRunIT#getRecordsInTarget()

View license
@Override
protected int getRecordsInTarget() throws IOException {
    int recordsRead = 0;
    DistributedFileSystem fileSystem = miniDFS.getFileSystem();
    FileStatus[] fileStatuses = fileSystem.listStatus(new Path("/tmp/out/" + TestUtil.getCurrentYear()));
    for (FileStatus f : fileStatuses) {
        BufferedReader br = new BufferedReader(new InputStreamReader(fileSystem.open(f.getPath())));
        String line = br.readLine();
        while (line != null) {
            recordsRead++;
            line = br.readLine();
        }
    }
    return recordsRead;
}

15. AbstractPail#readDir()

Project: dfs-datastores
Source File: AbstractPail.java
View license
protected List<String> readDir(String subdir, boolean dir) throws IOException {
    Path absDir;
    if (subdir.length() == 0) {
        absDir = new Path(_instance_root);
    } else {
        absDir = new Path(_instance_root, subdir);
    }
    List<String> ret = new ArrayList<String>();
    FileStatus[] contents = listStatus(absDir);
    for (FileStatus fs : contents) {
        String name = fs.getPath().getName();
        if ((fs.isDir() && dir || !fs.isDir() && !dir) && !name.contains("_")) {
            ret.add(name);
        }
    }
    return ret;
}

16. AbstractPail#getFilesHelper()

Project: dfs-datastores
Source File: AbstractPail.java
View license
private void getFilesHelper(Path abs, String rel, List<String> extensions, boolean stripExtension, List<String> files) throws IOException {
    FileStatus[] contents = listStatus(abs);
    for (FileStatus stat : contents) {
        Path p = stat.getPath();
        if (stat.isDir()) {
            getFilesHelper(p, relify(rel, stat.getPath().getName()), extensions, stripExtension, files);
        } else {
            String filename = relify(rel, stat.getPath().getName());
            for (String extension : extensions) {
                if (filename.endsWith(extension) && stat.getLen() > 0) {
                    String toAdd;
                    if (stripExtension) {
                        toAdd = Utils.stripExtension(filename, extension);
                    } else {
                        toAdd = filename;
                    }
                    files.add(toAdd);
                    break;
                }
            }
        }
    }
}

17. VersionedStoreTest#testCleanup()

View license
public void testCleanup() throws Exception {
    String tmp1 = getTmpPath(fs, "versions");
    VersionedStore vs = new VersionedStore(tmp1);
    for (int i = 1; i <= 4; i++) {
        String version = vs.createVersion(i);
        fs.mkdirs(new Path(version));
        vs.succeedVersion(i);
    }
    FileStatus[] files = fs.listStatus(new Path(tmp1));
    Assert.assertEquals(files.length, 8);
    vs.cleanup(2);
    files = fs.listStatus(new Path(tmp1));
    Assert.assertEquals(files.length, 4);
    for (FileStatus f : files) {
        String path = f.getPath().toString();
        Assert.assertTrue(path.endsWith("3") || path.endsWith("4") || path.endsWith("3.version") || path.endsWith(("4.version")));
    }
}

18. HdfsBlobContainer#listBlobsByPrefix()

View license
@Override
public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable final String prefix) throws IOException {
    FileStatus[] files = store.execute(new Operation<FileStatus[]>() {

        @Override
        public FileStatus[] run(FileContext fileContext) throws IOException {
            return (fileContext.util().listStatus(path, new PathFilter() {

                @Override
                public boolean accept(Path path) {
                    return prefix == null || path.getName().startsWith(prefix);
                }
            }));
        }
    });
    Map<String, BlobMetaData> map = new LinkedHashMap<String, BlobMetaData>();
    for (FileStatus file : files) {
        map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen()));
    }
    return Collections.unmodifiableMap(map);
}

19. HdfsUtils#walkPath()

Project: elephant-bird
Source File: HdfsUtils.java
View license
/**
   * Recursively walk a path applying visitor to each path accepted by
   * filter
   *
   * @param path root path to begin walking, will be visited if
   *             it passes the filter and directory flag
   * @param fs FileSystem for this path
   * @param filter filter to determine which paths to accept
   * @param visitor visitor to apply to each accepted path
   * @throws IOException
   */
public static void walkPath(Path path, FileSystem fs, PathFilter filter, PathVisitor visitor) throws IOException {
    FileStatus fileStatus = fs.getFileStatus(path);
    if (filter.accept(path)) {
        visitor.visit(fileStatus);
    }
    if (fileStatus.isDir()) {
        FileStatus[] children = fs.listStatus(path);
        for (FileStatus childStatus : children) {
            walkPath(childStatus.getPath(), fs, filter, visitor);
        }
    }
}

20. FileSystemStorage#discoverInstanceToDelete()

Project: falcon
Source File: FileSystemStorage.java
View license
private List<Path> discoverInstanceToDelete(String inPath, TimeZone timeZone, Date start, FileSystem fs) throws IOException {
    FileStatus[] files = findFilesForFeed(fs, inPath);
    if (files == null || files.length == 0) {
        return Collections.emptyList();
    }
    List<Path> toBeDeleted = new ArrayList<Path>();
    for (FileStatus file : files) {
        Date date = FeedHelper.getDate(inPath, new Path(file.getPath().toUri().getPath()), timeZone);
        LOG.debug("Considering {}", file.getPath().toUri().getPath());
        LOG.debug("Date: {}", date);
        if (date != null && !isDateInRange(date, start)) {
            toBeDeleted.add(file.getPath());
        }
    }
    return toBeDeleted;
}

21. HadoopUtil#getAllFilesHDFS()

Project: falcon
Source File: HadoopUtil.java
View license
/**
     * Retrieves all file names contained in a given directory.
     * @param fs filesystem
     * @param location given directory
     * @return list of file names
     * @throws IOException
     */
public static List<String> getAllFilesHDFS(FileSystem fs, Path location) throws IOException {
    List<String> files = new ArrayList<>();
    if (!fs.exists(location)) {
        return files;
    }
    FileStatus[] stats = fs.listStatus(location);
    for (FileStatus stat : stats) {
        if (!isDir(stat)) {
            files.add(stat.getPath().toString());
        }
    }
    return files;
}

22. HadoopUtil#getAllDirsRecursivelyHDFS()

Project: falcon
Source File: HadoopUtil.java
View license
/**
     * Retrieves all directories withing a given depth starting from a specific dir.
     * @param fs filesystem
     * @param location given dir
     * @param depth depth
     * @return all matching directories
     * @throws IOException
     */
public static List<Path> getAllDirsRecursivelyHDFS(FileSystem fs, Path location, int depth) throws IOException {
    List<Path> returnList = new ArrayList<>();
    FileStatus[] stats = fs.listStatus(location);
    for (FileStatus stat : stats) {
        if (isDir(stat)) {
            returnList.add(stat.getPath());
            if (depth > 0) {
                returnList.addAll(getAllDirsRecursivelyHDFS(fs, stat.getPath(), depth - 1));
            }
        }
    }
    return returnList;
}

23. HadoopUtil#getAllFileNamesFromHDFS()

Project: falcon
Source File: HadoopUtil.java
View license
/**
     * Lists all file names for a given directory.
     * @param fs filesystem
     * @param hdfsPath path to a given directory
     * @return list of files which given directory contains
     * @throws IOException
     */
private static List<String> getAllFileNamesFromHDFS(FileSystem fs, String hdfsPath) throws IOException {
    List<String> returnList = new ArrayList<>();
    LOGGER.info("getting file from folder: " + hdfsPath);
    FileStatus[] stats = fs.listStatus(new Path(hdfsPath));
    for (FileStatus stat : stats) {
        // gives directory name
        String currentPath = stat.getPath().toUri().getPath();
        if (!isDir(stat)) {
            returnList.add(currentPath);
        }
    }
    return returnList;
}

24. LateDataHandler#usage()

Project: falcon
Source File: LateDataHandler.java
View license
private long usage(Path inPath, Configuration conf) throws IOException, FalconException {
    FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(inPath.toUri(), conf);
    FileStatus[] fileStatuses = fs.globStatus(inPath);
    if (fileStatuses == null || fileStatuses.length == 0) {
        return 0;
    }
    long totalSize = 0;
    for (FileStatus fileStatus : fileStatuses) {
        totalSize += fs.getContentSummary(fileStatus.getPath()).getLength();
    }
    return totalSize;
}

25. OozieFeedWorkflowBuilderTest#verifyWorkflowUMask()

View license
private void verifyWorkflowUMask(FileSystem fs, COORDINATORAPP coord, String defaultUMask) throws IOException {
    Assert.assertEquals(fs.getConf().get("fs.permissions.umask-mode"), defaultUMask);
    String appPath = coord.getAction().getWorkflow().getAppPath().replace("${nameNode}", "");
    Path wfPath = new Path(appPath);
    FileStatus[] fileStatuses = fs.listStatus(wfPath);
    for (FileStatus fileStatus : fileStatuses) {
        Assert.assertEquals(fileStatus.getOwner(), CurrentUser.getProxyUGI().getShortUserName());
        final FsPermission permission = fileStatus.getPermission();
        if (!fileStatus.isDirectory()) {
            Assert.assertEquals(permission.toString(), HadoopClientFactory.getFileDefaultPermission(fs.getConf()).toString());
        }
    }
}

26. ZooKeeperManager#getServerListFile()

Project: giraph
Source File: ZooKeeperManager.java
View license
/**
   * Make an attempt to get the server list file by looking for a file in
   * the appropriate directory with the prefix
   * ZOOKEEPER_SERVER_LIST_FILE_PREFIX.
   * @return null if not found or the filename if found
   * @throws IOException
   */
private String getServerListFile() throws IOException {
    String serverListFile = null;
    FileStatus[] fileStatusArray = fs.listStatus(baseDirectory);
    for (FileStatus fileStatus : fileStatusArray) {
        if (fileStatus.getPath().getName().startsWith(ZOOKEEPER_SERVER_LIST_FILE_PREFIX)) {
            serverListFile = fileStatus.getPath().getName();
            break;
        }
    }
    return serverListFile;
}

27. BspCase#getSinglePartFileStatus()

Project: giraph
Source File: BspCase.java
View license
/**
   * Get the single part file status and make sure there is only one part
   *
   * @param conf Configuration to get the file system from
   * @param partDirPath Directory where the single part file should exist
   * @return Single part file status
   * @throws IOException
   */
public static FileStatus getSinglePartFileStatus(Configuration conf, Path partDirPath) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    FileStatus singlePartFileStatus = null;
    int partFiles = 0;
    for (FileStatus fileStatus : fs.listStatus(partDirPath)) {
        if (fileStatus.getPath().getName().equals("part-m-00000")) {
            singlePartFileStatus = fileStatus;
        }
        if (fileStatus.getPath().getName().startsWith("part-m-")) {
            ++partFiles;
        }
    }
    Preconditions.checkState(partFiles == 1, "getSinglePartFile: Part file " + "count should be 1, but is " + partFiles);
    return singlePartFileStatus;
}

28. HdfsReader#getFirstDataFilePathInDir()

Project: gobblin
Source File: HdfsReader.java
View license
public static String getFirstDataFilePathInDir(String dirInHdfs) throws IOException {
    FileStatus[] fileStatuses = getFileSystem().listStatus(new Path(dirInHdfs));
    for (FileStatus fileStatus : fileStatuses) {
        Path dataFilePath = fileStatus.getPath();
        if (!fileStatus.isDirectory() && !dataFilePath.getName().startsWith("_")) {
            return dataFilePath.toString();
        }
    }
    String message = dirInHdfs + " does not contain a valid data file.";
    LOG.error(message);
    throw new RuntimeException(message);
}

29. HdfsWriter#moveSelectFiles()

Project: gobblin
Source File: HdfsWriter.java
View license
public static void moveSelectFiles(String extension, String source, String destination) throws IOException {
    FileSystem fs = getFileSystem();
    fs.mkdirs(new Path(destination));
    FileStatus[] fileStatuses = fs.listStatus(new Path(source));
    for (FileStatus fileStatus : fileStatuses) {
        Path path = fileStatus.getPath();
        if (!fileStatus.isDirectory() && path.toString().toLowerCase().endsWith(extension.toLowerCase())) {
            HadoopUtils.deleteIfExists(fs, new Path(destination), true);
            HadoopUtils.copyPath(fs, path, fs, new Path(destination), getConfiguration());
        }
    }
}

30. MRCompactorAvroKeyDedupJobRunner#getNewestSchemaFromSource()

View license
private Schema getNewestSchemaFromSource(Path sourceDir) throws IOException {
    FileStatus[] files = this.fs.listStatus(sourceDir);
    Arrays.sort(files, new LastModifiedDescComparator());
    for (FileStatus status : files) {
        if (status.isDirectory()) {
            Schema schema = getNewestSchemaFromSource(status.getPath());
            if (schema != null)
                return schema;
        } else if (FilenameUtils.isExtension(status.getPath().getName(), AVRO)) {
            return AvroUtils.getSchemaFromDataFile(status.getPath(), this.fs);
        }
    }
    return null;
}

31. SubsetFilesCopyableDatasetFinder#generateDatasetsByIdentifier()

View license
public List<CopyableDataset> generateDatasetsByIdentifier(Path datasetRootDirPath) throws IOException {
    List<CopyableDataset> datasets = Lists.newArrayList();
    FileStatus[] fileStatuses = fs.listStatus(datasetRootDirPath, this.getPathFilter());
    for (FileStatus fileStatus : fileStatuses) {
        Matcher result = this.identifierPattern.matcher(fileStatus.getPath().getName().toString());
        if (result.find()) {
            String id = result.group(1);
            if (idToFileStatuses.containsKey(id)) {
                log.info("Adding " + fileStatus.getPath() + " to " + id);
                idToFileStatuses.get(id).add(fileStatus);
            } else {
                List<FileStatus> entry = new ArrayList<>();
                entry.add(fileStatus);
                log.info("Adding " + fileStatus.getPath() + " to " + id);
                idToFileStatuses.put(id, entry);
            }
        }
    }
    for (String id : idToFileStatuses.keySet()) {
        datasets.add(this.datasetAndPathWithIdentifier(datasetRootDirPath, id));
    }
    return datasets;
}

32. DatasetVersionFinder#findDatasetVersions()

Project: gobblin
Source File: DatasetVersionFinder.java
View license
/**
   * Find dataset versions in the input {@link org.apache.hadoop.fs.Path}. Dataset versions are subdirectories of the
   * input {@link org.apache.hadoop.fs.Path} representing a single manageable unit in the dataset.
   * See {@link gobblin.data.management.retention.DatasetCleaner} for more information.
   *
   * @param dataset {@link org.apache.hadoop.fs.Path} to directory containing all versions of a dataset.
   * @return Map of {@link gobblin.data.management.version.DatasetVersion} and {@link org.apache.hadoop.fs.FileStatus}
   *        for each dataset version found.
   * @throws IOException
   */
@Override
public Collection<T> findDatasetVersions(Dataset dataset) throws IOException {
    FileSystemDataset fsDataset = (FileSystemDataset) dataset;
    Path versionGlobStatus = new Path(fsDataset.datasetRoot(), globVersionPattern());
    FileStatus[] dataSetVersionPaths = this.fs.globStatus(versionGlobStatus);
    List<T> dataSetVersions = Lists.newArrayList();
    for (FileStatus dataSetVersionPath : dataSetVersionPaths) {
        T datasetVersion = getDatasetVersion(PathUtils.relativizePath(dataSetVersionPath.getPath(), fsDataset.datasetRoot()), dataSetVersionPath.getPath());
        if (datasetVersion != null) {
            dataSetVersions.add(datasetVersion);
        }
    }
    return dataSetVersions;
}

33. StateStoreCleaner#run()

Project: gobblin
Source File: StateStoreCleaner.java
View license
/**
   * Run the cleaner.
   * @throws ExecutionException
   */
public void run() throws IOException, ExecutionException {
    FileStatus[] stateStoreDirs = this.fs.listStatus(this.stateStoreRootDir);
    if (stateStoreDirs == null || stateStoreDirs.length == 0) {
        LOGGER.warn("The state store root directory does not exist or is empty");
        return;
    }
    List<Future<?>> futures = Lists.newArrayList();
    for (FileStatus stateStoreDir : stateStoreDirs) {
        futures.add(this.cleanerRunnerExecutor.submit(new CleanerRunner(this.fs, stateStoreDir.getPath(), this.retention, this.retentionTimeUnit)));
    }
    for (Future<?> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw new ExecutionException("Thread interrupted", e);
        }
    }
    ExecutorsUtils.shutdownExecutorService(cleanerRunnerExecutor, Optional.of(LOGGER), 60, TimeUnit.SECONDS);
}

34. GobblinYarnAppLauncher#addLibJars()

Project: gobblin
Source File: GobblinYarnAppLauncher.java
View license
private void addLibJars(Path srcLibJarDir, Optional<Map<String, LocalResource>> resourceMap, Path destDir) throws IOException {
    FileSystem localFs = FileSystem.getLocal(this.yarnConfiguration);
    FileStatus[] libJarFiles = localFs.listStatus(srcLibJarDir);
    if (libJarFiles == null || libJarFiles.length == 0) {
        return;
    }
    for (FileStatus libJarFile : libJarFiles) {
        Path destFilePath = new Path(destDir, libJarFile.getPath().getName());
        this.fs.copyFromLocalFile(libJarFile.getPath(), destFilePath);
        if (resourceMap.isPresent()) {
            YarnHelixUtils.addFileAsLocalResource(this.fs, destFilePath, LocalResourceType.FILE, resourceMap.get());
        }
    }
}

35. TestAvatarAPI#checkPrimary()

Project: hadoop-20
Source File: TestAvatarAPI.java
View license
private void checkPrimary() throws Exception {
    FileStatus fs = dafs.getFileStatus(path, false);
    FileStatus[] dir = dafs.listStatus(dirPath, false);
    RemoteIterator<Path> cfb = dafs.listCorruptFileBlocks(dirPath, false);
    assertTrue("DAFS file status has the wrong length", fs != null && fs.getLen() == FILE_LEN);
    assertTrue("DAFS directory listing has the wrong length", dir != null && dir.length == 1);
    assertTrue("DAFS expected 0 corrupt file blocks", countPaths(cfb) == 0);
    ContentSummary cs = dafs.getContentSummary(path, false);
    DatanodeInfo[] di = dafs.getDataNodeStats(false);
    assertTrue("DAFS datanode info should contain 3 data nodes", di.length == 3);
}

36. DistributedRaidFileSystem#searchHarDir()

View license
/**
   * search the Har-ed parity files
   */
private boolean searchHarDir(FileStatus stat) throws IOException {
    if (!stat.isDir()) {
        return false;
    }
    String pattern = stat.getPath().toString() + "/*" + RaidNode.HAR_SUFFIX + "*";
    FileStatus[] stats = globStatus(new Path(pattern));
    if (stats != null && stats.length > 0) {
        return true;
    }
    stats = fs.listStatus(stat.getPath());
    // search deeper.
    for (FileStatus status : stats) {
        if (searchHarDir(status)) {
            return true;
        }
    }
    return false;
}

37. PolicyInfo#getSrcPathExpanded()

Project: hadoop-20
Source File: PolicyInfo.java
View license
/**
   * Get the expanded (unglobbed) forms of the srcPaths
   */
public List<Path> getSrcPathExpanded() throws IOException {
    FileSystem fs = srcPath.getFileSystem(conf);
    // globbing on srcPath
    FileStatus[] gpaths = fs.globStatus(srcPath);
    if (gpaths == null) {
        return Collections.emptyList();
    }
    List<Path> results = new ArrayList<Path>(gpaths.length);
    for (FileStatus f : gpaths) {
        results.add(f.getPath().makeQualified(fs));
    }
    return removeConflictPath(results);
}

38. RaidNode#listDirectoryRaidFileStatus()

Project: hadoop-20
Source File: RaidNode.java
View license
public static List<FileStatus> listDirectoryRaidFileStatus(Configuration conf, FileSystem srcFs, Path p) throws IOException {
    long minFileSize = conf.getLong(MINIMUM_RAIDABLE_FILESIZE_KEY, MINIMUM_RAIDABLE_FILESIZE);
    List<FileStatus> lfs = new ArrayList<FileStatus>();
    FileStatus[] files = srcFs.listStatus(p);
    if (null == files) {
        return null;
    }
    for (FileStatus stat : files) {
        if (stat.isDir()) {
            return null;
        }
        // We don't raid too small files
        if (stat.getLen() < minFileSize) {
            continue;
        }
        lfs.add(stat);
    }
    if (lfs.size() == 0)
        return null;
    return lfs;
}

39. TestRaidNode#validateFile()

Project: hadoop-20
Source File: TestRaidNode.java
View license
//
// validates that file matches the crc.
//
private void validateFile(FileSystem fileSys, Path name1, Path name2, long crc) throws IOException {
    FileStatus stat1 = fileSys.getFileStatus(name1);
    FileStatus stat2 = fileSys.getFileStatus(name2);
    assertTrue(" Length of file " + name1 + " is " + stat1.getLen() + " is different from length of file " + name1 + " " + stat2.getLen(), stat1.getLen() == stat2.getLen());
    CRC32 newcrc = new CRC32();
    FSDataInputStream stm = fileSys.open(name2);
    final byte[] b = new byte[4192];
    int num = 0;
    while (num >= 0) {
        num = stm.read(b);
        if (num < 0) {
            break;
        }
        newcrc.update(b, 0, num);
    }
    stm.close();
    if (newcrc.getValue() != crc) {
        fail("CRC mismatch of files " + name1 + " with file " + name2);
    }
}

40. FTPFileSystem#delete()

Project: hadoop-20
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
private boolean delete(FTPClient client, Path file, boolean recursive) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    String pathName = absolute.toUri().getPath();
    FileStatus fileStat = getFileStatus(client, absolute);
    if (!fileStat.isDir()) {
        return client.deleteFile(pathName);
    }
    FileStatus[] dirEntries = listStatus(client, absolute);
    if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
        throw new IOException("Directory: " + file + " is not empty.");
    }
    if (dirEntries != null) {
        for (int i = 0; i < dirEntries.length; i++) {
            delete(client, new Path(absolute, dirEntries[i].getPath()), recursive);
        }
    }
    return client.removeDirectory(pathName);
}

41. FTPFileSystem#listStatus()

Project: hadoop-20
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
private FileStatus[] listStatus(FTPClient client, Path file) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    FileStatus fileStat = getFileStatus(client, absolute);
    if (!fileStat.isDir()) {
        return new FileStatus[] { fileStat };
    }
    FTPFile[] ftpFiles = client.listFiles(absolute.toUri().getPath());
    FileStatus[] fileStats = new FileStatus[ftpFiles.length];
    for (int i = 0; i < ftpFiles.length; i++) {
        fileStats[i] = getFileStatus(ftpFiles[i], absolute);
    }
    return fileStats;
}

42. FastCopy#expandSingle()

Project: hadoop-20
Source File: FastCopy.java
View license
/**
   * Expand a single file, if its a file pattern list out all files matching the
   * pattern, if its a directory return all files under the directory.
   *
   * @param src
   *          the file to be expanded
   * @param dstPath
   *          the destination
   * @return the expanded file list for this file/filepattern
   * @throws IOException
   */
private static List<CopyPath> expandSingle(Path src, Path dstPath) throws IOException {
    List<Path> expandedPaths = new ArrayList<Path>();
    FileSystem fs = src.getFileSystem(defaultConf);
    FileStatus[] stats = fs.globStatus(src);
    if (stats == null || stats.length == 0) {
        throw new IOException("Path : " + src + " is invalid");
    }
    for (FileStatus stat : stats) {
        expandedPaths.add(stat.getPath());
    }
    List<CopyPath> expandedDirs = expandDirectories(fs, expandedPaths, dstPath);
    return expandedDirs;
}

43. TestOfflineImageViewer#compareFiles()

View license
// Compare two files as listed in the original namespace FileStatus and
// the output of the ls file from the image processor
private void compareFiles(FileStatusWithHardLink fsh, LsElements elements) {
    FileStatus fs = fsh.stat;
    char type = '-';
    if (fs.isDir()) {
        type = 'd';
    } else if (fsh.hardLinkId != -1) {
        type = 'h';
    }
    assertEquals("file type not equal for : " + fs.getPath(), type, elements.dir);
    assertEquals("perms string equal", fs.getPermission().toString(), elements.perms);
    assertEquals("replication equal", fs.getReplication(), elements.replication);
    assertEquals("owner equal", fs.getOwner(), elements.username);
    assertEquals("group equal", fs.getGroup(), elements.groupname);
    assertEquals("lengths equal", fs.getLen(), elements.filesize);
}

44. HadoopArchives#findFirstAvailablePartId()

Project: hadoop-20
Source File: HadoopArchives.java
View license
private int findFirstAvailablePartId(Path archivePath) throws IOException {
    FileSystem fs = archivePath.getFileSystem(conf);
    FileStatus[] fileStatuses = fs.listStatus(archivePath);
    int result = 0;
    for (FileStatus fileStatus : fileStatuses) {
        String name = fileStatus.getPath().getName();
        if (name.startsWith(PART_PREFIX)) {
            int id = Integer.parseInt(name.substring(PART_PREFIX.length()));
            result = Math.max(result, id + 1);
        }
    }
    return result;
}

45. TextIOJobBuilder#verifyResults()

Project: hadoop-book
Source File: TextIOJobBuilder.java
View license
public TextIOJobBuilder verifyResults() throws IOException {
    FileStatus[] outputFiles = fs.listStatus(outputPath, new PathFilter() {

        @Override
        public boolean accept(Path path) {
            return path.getName().startsWith("part");
        }
    });
    int i = 0;
    for (FileStatus file : outputFiles) {
        List<String> actualLines = readLines(fs, file.getPath());
        assertTrue(actualLines.size() <= expectedOutputs.size() - i);
        for (String actualLine : actualLines) {
            String expectedLine = expectedOutputs.get(i++);
            assertEquals(expectedLine, actualLine);
        }
    }
    return this;
}

46. FTPFileSystem#delete()

Project: hadoop-common
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
private boolean delete(FTPClient client, Path file, boolean recursive) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    String pathName = absolute.toUri().getPath();
    FileStatus fileStat = getFileStatus(client, absolute);
    if (!fileStat.isDir()) {
        return client.deleteFile(pathName);
    }
    FileStatus[] dirEntries = listStatus(client, absolute);
    if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
        throw new IOException("Directory: " + file + " is not empty.");
    }
    if (dirEntries != null) {
        for (int i = 0; i < dirEntries.length; i++) {
            delete(client, new Path(absolute, dirEntries[i].getPath()), recursive);
        }
    }
    return client.removeDirectory(pathName);
}

47. FTPFileSystem#listStatus()

Project: hadoop-common
Source File: FTPFileSystem.java
View license
/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
private FileStatus[] listStatus(FTPClient client, Path file) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    FileStatus fileStat = getFileStatus(client, absolute);
    if (!fileStat.isDir()) {
        return new FileStatus[] { fileStat };
    }
    FTPFile[] ftpFiles = client.listFiles(absolute.toUri().getPath());
    FileStatus[] fileStats = new FileStatus[ftpFiles.length];
    for (int i = 0; i < ftpFiles.length; i++) {
        fileStats[i] = getFileStatus(ftpFiles[i], absolute);
    }
    return fileStats;
}

48. TestSplittableBufferedWriter#ensureEmptyWriteDir()

View license
/** Create the directory where we'll write our test files to; and
   * make sure it has no files in it.
   */
private void ensureEmptyWriteDir() throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    Path writeDir = getWritePath();
    fs.mkdirs(writeDir);
    FileStatus[] stats = fs.listStatus(writeDir);
    for (FileStatus stat : stats) {
        if (stat.isDir()) {
            fail("setUp(): Write directory " + writeDir + " contains subdirectories");
        }
        LOG.debug("setUp(): Removing " + stat.getPath());
        if (!fs.delete(stat.getPath(), false)) {
            fail("setUp(): Could not delete residual file " + stat.getPath());
        }
    }
    if (!fs.exists(writeDir)) {
        fail("setUp: Could not create " + writeDir);
    }
}

49. TestMultiMaps#getDataFilePaths()

Project: hadoop-mapreduce
Source File: TestMultiMaps.java
View license
/** @return a list of Path objects for each data file */
protected List<Path> getDataFilePaths() throws IOException {
    List<Path> paths = new ArrayList<Path>();
    Configuration conf = new Configuration();
    conf.set("fs.default.name", "file:///");
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stats = fs.listStatus(getTablePath(), new Utils.OutputFileUtils.OutputFilesFilter());
    for (FileStatus stat : stats) {
        paths.add(stat.getPath());
    }
    return paths;
}

50. HFile#getStoreFiles()

Project: hindex
Source File: HFile.java
View license
/**
   * Returns all files belonging to the given region directory. Could return an
   * empty list.
   *
   * @param fs  The file system reference.
   * @param regionDir  The region directory to scan.
   * @return The list of files found.
   * @throws IOException When scanning the files fails.
   */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir) throws IOException {
    List<Path> res = new ArrayList<Path>();
    PathFilter dirFilter = new FSUtils.DirFilter(fs);
    FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
    for (FileStatus dir : familyDirs) {
        FileStatus[] files = fs.listStatus(dir.getPath());
        for (FileStatus file : files) {
            if (!file.isDir()) {
                res.add(file.getPath());
            }
        }
    }
    return res;
}

51. RestoreSnapshotHelper#getTableRegionFamilyFiles()

Project: hindex
Source File: RestoreSnapshotHelper.java
View license
/**
   * @return The set of files in the specified family directory.
   */
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
    Set<String> familyFiles = new HashSet<String>();
    FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
    if (hfiles == null)
        return familyFiles;
    for (FileStatus hfileRef : hfiles) {
        String hfileName = hfileRef.getPath().getName();
        familyFiles.add(hfileName);
    }
    return familyFiles;
}

52. RestoreSnapshotHelper#getTableRegions()

Project: hindex
Source File: RestoreSnapshotHelper.java
View license
/**
   * @return the set of the regions contained in the table
   */
private List<HRegionInfo> getTableRegions() throws IOException {
    LOG.debug("get table regions: " + tableDir);
    FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
    if (regionDirs == null)
        return null;
    List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
    for (FileStatus regionDir : regionDirs) {
        HRegionInfo hri = HRegion.loadDotRegionInfoFileContent(fs, regionDir.getPath());
        regions.add(hri);
    }
    LOG.debug("found " + regions.size() + " regions for table=" + tableDesc.getNameAsString());
    return regions;
}

53. TakeSnapshotUtils#getMapOfServersAndLogs()

Project: hindex
Source File: TakeSnapshotUtils.java
View license
/**
   * @param logdir
   * @param toInclude list of servers to include. If empty or null, returns all servers
   * @return maps of servers to all their log files. If there is no log directory, returns
   *         <tt>null</tt>
   */
private static Multimap<String, String> getMapOfServersAndLogs(FileSystem fs, Path logdir, Collection<String> toInclude) throws IOException {
    // create a path filter based on the passed directories to include
    PathFilter filter = toInclude == null || toInclude.size() == 0 ? null : new MatchesDirectoryNames(toInclude);
    // get all the expected directories
    FileStatus[] serverLogDirs = FSUtils.listStatus(fs, logdir, filter);
    if (serverLogDirs == null)
        return null;
    // map those into a multimap of servername -> [log files]
    Multimap<String, String> map = HashMultimap.create();
    for (FileStatus server : serverLogDirs) {
        FileStatus[] serverLogs = FSUtils.listStatus(fs, server.getPath(), null);
        if (serverLogs == null)
            continue;
        for (FileStatus log : serverLogs) {
            map.put(server.getPath().getName(), log.getPath().getName());
        }
    }
    return map;
}

54. FSUtils#getTableDirs()

Project: hindex
Source File: FSUtils.java
View license
/**
   * @param fs
   * @param rootdir
   * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
   * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders.
   * @throws IOException
   */
public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir) throws IOException {
    // presumes any directory under hbase.rootdir is a table
    FileStatus[] dirs = fs.listStatus(rootdir, new DirFilter(fs));
    List<Path> tabledirs = new ArrayList<Path>(dirs.length);
    for (FileStatus dir : dirs) {
        Path p = dir.getPath();
        String tableName = p.getName();
        if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
            tabledirs.add(p);
        }
    }
    return tabledirs;
}

55. FSUtils#logFSTree()

Project: hindex
Source File: FSUtils.java
View license
/**
   * Recursive helper to log the state of the FS
   * @see #logFileSystemState(FileSystem, Path, Log)
   */
private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix) throws IOException {
    FileStatus[] files = FSUtils.listStatus(fs, root, null);
    if (files == null)
        return;
    for (FileStatus file : files) {
        if (file.isDir()) {
            LOG.debug(prefix + file.getPath().getName() + "/");
            logFSTree(LOG, fs, file.getPath(), prefix + "---");
        } else {
            LOG.debug(prefix + file.getPath().getName());
        }
    }
}

56. FSVisitor#visitTableStoreFiles()

Project: hindex
Source File: FSVisitor.java
View license
/**
   * Iterate over the table store files
   *
   * @param fs {@link FileSystem}
   * @param tableDir {@link Path} to the table directory
   * @param visitor callback object to get the store files
   * @throws IOException if an error occurred while scanning the directory
   */
public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.info("No regions under directory:" + tableDir);
        return;
    }
    for (FileStatus region : regions) {
        visitRegionStoreFiles(fs, region.getPath(), visitor);
    }
}

57. FSVisitor#visitTableRecoveredEdits()

Project: hindex
Source File: FSVisitor.java
View license
/**
   * Iterate over each region in the table and inform about recovered.edits
   *
   * @param fs {@link FileSystem}
   * @param tableDir {@link Path} to the table directory
   * @param visitor callback object to get the recovered.edits files
   * @throws IOException if an error occurred while scanning the directory
   */
public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.info("No regions under directory:" + tableDir);
        return;
    }
    for (FileStatus region : regions) {
        visitRegionRecoveredEdits(fs, region.getPath(), visitor);
    }
}

58. FSVisitor#visitLogFiles()

Project: hindex
Source File: FSVisitor.java
View license
/**
   * Iterate over hbase log files
   *
   * @param fs {@link FileSystem}
   * @param rootDir {@link Path} to the HBase root folder
   * @param visitor callback object to get the log files
   * @throws IOException if an error occurred while scanning the directory
   */
public static void visitLogFiles(final FileSystem fs, final Path rootDir, final LogFileVisitor visitor) throws IOException {
    Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
    FileStatus[] logServerDirs = FSUtils.listStatus(fs, logsDir);
    if (logServerDirs == null) {
        LOG.info("No logs under directory:" + logsDir);
        return;
    }
    for (FileStatus serverLogs : logServerDirs) {
        String serverName = serverLogs.getPath().getName();
        FileStatus[] hlogs = FSUtils.listStatus(fs, serverLogs.getPath());
        if (hlogs == null) {
            LOG.debug("No hfiles found for server: " + serverName + ", skipping.");
            continue;
        }
        for (FileStatus hlogRef : hlogs) {
            visitor.logFile(serverName, hlogRef.getPath().getName());
        }
    }
}

59. HBaseFsck#preCheckPermission()

Project: hindex
Source File: HBaseFsck.java
View license
private void preCheckPermission() throws IOException, AccessControlException {
    if (shouldIgnorePreCheckPermission()) {
        return;
    }
    Path hbaseDir = new Path(getConf().get(HConstants.HBASE_DIR));
    FileSystem fs = hbaseDir.getFileSystem(getConf());
    User user = User.getCurrent();
    FileStatus[] files = fs.listStatus(hbaseDir);
    for (FileStatus file : files) {
        try {
            FSUtils.checkAccess(user, file, FsAction.WRITE);
        } catch (AccessControlException ace) {
            LOG.warn("Got AccessControlException when preCheckPermission ", ace);
            errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + user.getShortName() + " does not have write perms to " + file.getPath() + ". Please rerun hbck as hdfs user " + file.getOwner());
            throw new AccessControlException(ace);
        }
    }
}

60. HFileCorruptionChecker#checkColFamDir()

Project: hindex
Source File: HFileCorruptionChecker.java
View license
/**
   * Check all files in a column family dir.
   *
   * @param cfDir
   *          column family directory
   * @throws IOException
   */
protected void checkColFamDir(Path cfDir) throws IOException {
    FileStatus[] hfs = null;
    try {
        // use same filter as scanner.
        hfs = fs.listStatus(cfDir, new HFileFilter(fs));
    } catch (FileNotFoundException fnfe) {
        LOG.warn("Colfam Directory " + cfDir + " does not exist.  Likely due to concurrent split/compaction. Skipping.");
        missing.add(cfDir);
        return;
    }
    // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
    if (hfs.length == 0 && !fs.exists(cfDir)) {
        LOG.warn("Colfam Directory " + cfDir + " does not exist.  Likely due to concurrent split/compaction. Skipping.");
        missing.add(cfDir);
        return;
    }
    for (FileStatus hfFs : hfs) {
        Path hf = hfFs.getPath();
        checkHFile(hf);
    }
}

61. HFileCorruptionChecker#checkRegionDir()

Project: hindex
Source File: HFileCorruptionChecker.java
View license
/**
   * Check all column families in a region dir.
   *
   * @param regionDir
   *          region directory
   * @throws IOException
   */
protected void checkRegionDir(Path regionDir) throws IOException {
    FileStatus[] cfs = null;
    try {
        cfs = fs.listStatus(regionDir, new FamilyDirFilter(fs));
    } catch (FileNotFoundException fnfe) {
        LOG.warn("Region Directory " + regionDir + " does not exist.  Likely due to concurrent split/compaction. Skipping.");
        missing.add(regionDir);
        return;
    }
    // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
    if (cfs.length == 0 && !fs.exists(regionDir)) {
        LOG.warn("Region Directory " + regionDir + " does not exist.  Likely due to concurrent split/compaction. Skipping.");
        missing.add(regionDir);
        return;
    }
    for (FileStatus cfFs : cfs) {
        Path cfDir = cfFs.getPath();
        checkColFamDir(cfDir);
    }
}

62. TestSnapshotFromMaster#getArchivedHFiles()

Project: hindex
Source File: TestSnapshotFromMaster.java
View license
/**
   * @return all the HFiles for a given table that have been archived
   * @throws IOException on expected failure
   */
private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir, FileSystem fs, String tableName) throws IOException {
    Path tableArchive = new Path(archiveDir, tableName);
    FileStatus[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive);
    List<String> files = new ArrayList<String>(archivedHFiles.length);
    LOG.debug("Have archived hfiles: " + tableArchive);
    for (FileStatus file : archivedHFiles) {
        LOG.debug(file.getPath());
        files.add(file.getPath().getName());
    }
    // sort the archived files
    Collections.sort(files);
    return files;
}

63. TestStore#getLowestTimeStampFromFS()

Project: hindex
Source File: TestStore.java
View license
private static long getLowestTimeStampFromFS(FileSystem fs, final List<StoreFile> candidates) throws IOException {
    long minTs = Long.MAX_VALUE;
    if (candidates.isEmpty()) {
        return minTs;
    }
    Path[] p = new Path[candidates.size()];
    for (int i = 0; i < candidates.size(); ++i) {
        p[i] = candidates.get(i).getPath();
    }
    FileStatus[] stats = fs.listStatus(p);
    if (stats == null || stats.length == 0) {
        return minTs;
    }
    for (FileStatus s : stats) {
        minTs = Math.min(minTs, s.getModificationTime());
    }
    return minTs;
}

64. TestHLogSplit#setUp()

Project: hindex
Source File: TestHLogSplit.java
View license
@Before
public void setUp() throws Exception {
    flushToConsole("Cleaning up cluster for new test\n" + "--------------------------");
    conf = TEST_UTIL.getConfiguration();
    fs = TEST_UTIL.getDFSCluster().getFileSystem();
    FileStatus[] entries = fs.listStatus(new Path("/"));
    flushToConsole("Num entries in /:" + entries.length);
    for (FileStatus dir : entries) {
        assertTrue("Deleting " + dir.getPath(), fs.delete(dir.getPath(), true));
    }
    // create the HLog directory because recursive log creates are not allowed
    fs.mkdirs(hlogDir);
    seq = 0;
    regions = new ArrayList<String>();
    Collections.addAll(regions, "bbb", "ccc");
    InstrumentedSequenceFileLogWriter.activateFailure = false;
}

65. TestArchive#browseDir()

Project: HiTune
Source File: TestArchive.java
View license
public void browseDir(FileSystem fs, Path p, int d) throws IOException {
    for (int i = 0; i < d; ++i) {
        System.out.print(" |");
    }
    FileStatus stat = fs.getFileStatus(p);
    if (stat.isDir()) {
        System.out.println(" \\ " + p.getName());
        FileStatus[] files = fs.listStatus(p);
        for (FileStatus f : files) {
            browseDir(fs, f.getPath(), d + 1);
        }
    } else
        System.out.println(p.getName());
}

66. FileLister#traverseDirs()

Project: hraven
Source File: FileLister.java
View license
/**
   * Recursively traverses the dirs to get the list of
   * files for a given path filtered as per the input path range filter
   *
   */
private static void traverseDirs(List<FileStatus> fileStatusesList, FileSystem hdfs, Path inputPath, JobFileModifiedRangePathFilter jobFileModifiedRangePathFilter) throws IOException {
    // get all the files and dirs in the current dir
    FileStatus allFiles[] = hdfs.listStatus(inputPath);
    for (FileStatus aFile : allFiles) {
        if (aFile.isDir()) {
            //recurse here
            traverseDirs(fileStatusesList, hdfs, aFile.getPath(), jobFileModifiedRangePathFilter);
        } else {
            // check if the pathFilter is accepted for this file
            if (jobFileModifiedRangePathFilter.accept(aFile.getPath())) {
                fileStatusesList.add(aFile);
            }
        }
    }
}

67. ApplyTfCSVMR#deletePartFiles()

View license
private static void deletePartFiles(FileSystem fs, Path path) throws FileNotFoundException, IOException {
    PathFilter filter = new PathFilter() {

        public boolean accept(Path file) {
            return file.getName().startsWith("part-");
        }
    };
    FileStatus[] list = fs.listStatus(path, filter);
    for (FileStatus stat : list) {
        fs.delete(stat.getPath(), false);
    }
}

68. MapReduceTool#isFileEmpty()

View license
public static boolean isFileEmpty(FileSystem fs, String dir) throws IOException {
    Path pth = new Path(dir);
    FileStatus fstat = fs.getFileStatus(pth);
    if (fstat.isDirectory()) {
        // it is a directory
        FileStatus[] stats = fs.listStatus(pth);
        if (stats != null) {
            for (FileStatus stat : stats) {
                if (stat.getLen() > 0)
                    return false;
            }
            return true;
        } else {
            return true;
        }
    } else {
        // it is a regular file
        if (fstat.getLen() == 0)
            return true;
        else
            return false;
    }
}

69. MapReduceTool#getSubDirs()

View license
/**
	 * 
	 * @param dir
	 * @return
	 * @throws IOException
	 */
public static String getSubDirs(String dir) throws IOException {
    FileSystem fs = FileSystem.get(_rJob);
    FileStatus[] files = fs.listStatus(new Path(dir));
    StringBuilder sb = new StringBuilder();
    for (FileStatus file : files) {
        if (sb.length() > 0)
            sb.append(",");
        sb.append(file.getPath().toString());
    }
    return sb.toString();
}

70. MapReduceTool#getSubDirsIgnoreLogs()

View license
/**
	 * 
	 * @param dir
	 * @return
	 * @throws IOException
	 */
public static String getSubDirsIgnoreLogs(String dir) throws IOException {
    FileSystem fs = FileSystem.get(_rJob);
    FileStatus[] files = fs.listStatus(new Path(dir));
    StringBuilder sb = new StringBuilder();
    for (FileStatus file : files) {
        String name = file.getPath().toString();
        if (name.contains("_logs"))
            continue;
        if (sb.length() > 0)
            sb.append(",");
        sb.append(name);
    }
    return sb.toString();
}

71. HDFSTransactionStateStorage#deleteLogsOlderThan()

View license
@Override
public void deleteLogsOlderThan(long timestamp) throws IOException {
    FileStatus[] statuses = fs.listStatus(snapshotDir, new LogFileFilter(0, timestamp));
    int removedCnt = 0;
    for (FileStatus status : statuses) {
        LOG.debug("Removing old transaction log {}", status.getPath());
        if (fs.delete(status.getPath(), false)) {
            removedCnt++;
        } else {
            LOG.error("Failed to delete transaction log file {}", status.getPath());
        }
    }
    LOG.debug("Removed {} transaction logs older than {}", removedCnt, timestamp);
}

72. TestKaaHdfsSink#readAndCheckResultsFromHdfs()

Project: kaa
Source File: TestKaaHdfsSink.java
View license
private void readAndCheckResultsFromHdfs(RecordHeader header, List<TestLogData> testLogs) throws IOException {
    Path logsPath = new Path("/logs" + Path.SEPARATOR + applicationToken + Path.SEPARATOR + logSchemaVersion + Path.SEPARATOR + "data*");
    FileStatus[] statuses = fileSystem.globStatus(logsPath);
    List<TestLogData> resultTestLogs = new ArrayList<>();
    Schema wrapperSchema = RecordWrapperSchemaGenerator.generateRecordWrapperSchema(TestLogData.getClassSchema().toString());
    for (FileStatus status : statuses) {
        FileReader<GenericRecord> fileReader = null;
        try {
            SeekableInput input = new FsInput(status.getPath(), fileSystem.getConf());
            DatumReader<GenericRecord> datumReader = new SpecificDatumReader<>(wrapperSchema);
            fileReader = DataFileReader.openReader(input, datumReader);
            for (GenericRecord record : fileReader) {
                RecordHeader recordHeader = (RecordHeader) record.get(RecordWrapperSchemaGenerator.RECORD_HEADER_FIELD);
                Assert.assertEquals(header, recordHeader);
                TestLogData recordData = (TestLogData) record.get(RecordWrapperSchemaGenerator.RECORD_DATA_FIELD);
                resultTestLogs.add(recordData);
            }
        } finally {
            IOUtils.closeQuietly(fileReader);
        }
    }
    Assert.assertEquals(testLogs, resultTestLogs);
}

73. FileSystemUtil#visit()

Project: kite
Source File: FileSystemUtil.java
View license
private static <T> T visit(PathVisitor<T> visitor, FileSystem fs, Path path, List<Path> followedLinks) throws IOException {
    if (fs.isFile(path)) {
        return visitor.file(fs, path);
    } else if (IS_SYMLINK != null && IS_SYMLINK.<Boolean>invoke(fs.getFileStatus(path))) {
        Preconditions.checkArgument(!followedLinks.contains(path), "Encountered recursive path structure at link: " + path);
        // no need to remove
        followedLinks.add(path);
        return visit(visitor, fs, fs.getLinkTarget(path), followedLinks);
    }
    List<T> children = Lists.newArrayList();
    FileStatus[] statuses = fs.listStatus(path, PathFilters.notHidden());
    for (FileStatus stat : statuses) {
        children.add(visit(visitor, fs, stat.getPath()));
    }
    return visitor.directory(fs, path, children);
}

74. DeployCoprocessorCLI#getNewestCoprocessorJar()

Project: kylin
Source File: DeployCoprocessorCLI.java
View license
public static Path getNewestCoprocessorJar(KylinConfig config, FileSystem fileSystem) throws IOException {
    Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, config);
    FileStatus newestJar = null;
    for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) {
        if (fileStatus.getPath().toString().endsWith(".jar")) {
            if (newestJar == null) {
                newestJar = fileStatus;
            } else {
                if (newestJar.getModificationTime() < fileStatus.getModificationTime())
                    newestJar = fileStatus;
            }
        }
    }
    if (newestJar == null)
        return null;
    Path path = newestJar.getPath().makeQualified(fileSystem.getUri(), null);
    logger.info("The newest coprocessor is " + path.toString());
    return path;
}

75. QueryResultPurger#purgePaths()

Project: lens
Source File: QueryResultPurger.java
View license
public void purgePaths(Path path, DateUtil.TimeDiff retention, boolean purgeDirectory) throws IOException {
    int counter = 0;
    FileSystem fs = path.getFileSystem(conf);
    FileStatus[] fileList = fs.listStatus(path);
    for (FileStatus f : fileList) {
        if ((f.isFile() || (f.isDirectory() && purgeDirectory)) && canBePurged(f, retention)) {
            try {
                if (fs.delete(f.getPath(), true)) {
                    counter++;
                } else {
                    getMetrics().incrCounter(this.getClass(), QUERY_RESULT_PURGER_ERROR_COUNTER);
                }
            } catch (IOException e) {
                getMetrics().incrCounter(this.getClass(), QUERY_RESULT_PURGER_ERROR_COUNTER);
            }
        }
    }
    log.info("Purged {} files/directories in {}", counter, path.toString());
}

76. TestQueryService#readResultFileSize()

Project: lens
Source File: TestQueryService.java
View license
/**
   * Returns the size of result set file when result path is a file, null otherwise
   *
   * @param resultset
   * @param handle
   * @param isDir
   * @return
   * @throws IOException
   */
public static Long readResultFileSize(PersistentQueryResult resultset, QueryHandle handle, boolean isDir) throws IOException {
    assertTrue(resultset.getPersistedURI().contains(handle.toString()));
    Path actualPath = new Path(resultset.getPersistedURI());
    FileSystem fs = actualPath.getFileSystem(new Configuration());
    FileStatus fileStatus = fs.getFileStatus(actualPath);
    if (fileStatus.isDir()) {
        assertTrue(isDir);
        return null;
    } else {
        assertFalse(isDir);
        return fileStatus.getLen();
    }
}

77. DictionaryImporter#run()

Project: lumify
Source File: DictionaryImporter.java
View license
@Override
protected int run(CommandLine cmd) throws Exception {
    User user = getUser();
    FileSystem fs = getFileSystem();
    Path dictionaryPath = new Path(directory);
    FileStatus[] files = fs.listStatus(dictionaryPath, new DictionaryPathFilter(this.extension));
    for (FileStatus fileStatus : files) {
        LOGGER.info("Importing dictionary file: " + fileStatus.getPath().toString());
        String conceptName = FilenameUtils.getBaseName(fileStatus.getPath().toString());
        conceptName = URLDecoder.decode(conceptName, "UTF-8");
        Concept concept = getOntologyRepository().getConceptByIRI(conceptName);
        checkNotNull(concept, "Could not find concept with name " + conceptName);
        writeFile(fs.open(fileStatus.getPath()), conceptName, user);
    }
    modelSession.close();
    return 0;
}

78. AvroUtils#addAvroCacheFiles()

Project: ml-ease
Source File: AvroUtils.java
View license
/**
   * Given a path to an output folder, it finds the existing "*.avro" files and adds 
   * them as cache files to be distributed. Throws an exception if no files are found/added.
   * 
   * @param conf Job configuration
   * @param outPath The path to the hdfs directory that has part files to cache
   * @throws Exception If no file is found at outPath throws a RuntimeException 
   */
public static void addAvroCacheFiles(JobConf conf, Path outPath) throws Exception {
    FileStatus[] partFiles = getAvroPartFiles(conf, outPath);
    if (partFiles.length == 0) {
        throw new RuntimeException("DistributedCacheFileUtils: No (part) file is found to cache at location:" + outPath);
    }
    for (FileStatus partFile : partFiles) {
        // add the file and set fileRead to true, since we have read at least one file
        DistributedCache.addCacheFile(partFile.getPath().toUri(), conf);
    }
}

79. HadoopFileSource#getAllFileNames()

Project: mr4c
Source File: HadoopFileSource.java
View license
private List<String> getAllFileNames(Path dir) throws IOException {
    List<String> names = new ArrayList<String>();
    FileStatus[] files = m_fs.listStatus(dir);
    if (files == null) {
        throw new FileNotFoundException(String.format("[%s] is not an existing directory", dir));
    }
    for (FileStatus status : files) {
        if (status.isDirectory() && !m_flat) {
            names.addAll(getAllFileNames(status.getPath()));
        } else {
            String name = m_dir.toUri().relativize(status.getPath().toUri()).getPath();
            names.add(name);
        }
    }
    return names;
}

80. HadoopFileUtils#getPathSize()

Project: mrgeo
Source File: HadoopFileUtils.java
View license
public static long getPathSize(final Configuration conf, final Path p) throws IOException {
    long result = 0;
    final FileSystem fs = getFileSystem(conf, p);
    final FileStatus status = fs.getFileStatus(p);
    if (status.isDir()) {
        final FileStatus[] list = fs.listStatus(p);
        for (final FileStatus l : list) {
            result += getPathSize(conf, l.getPath());
        }
    } else {
        result = status.getLen();
    }
    return result;
}

81. XFsTestCase#setAllPermissions()

Project: oozie
Source File: XFsTestCase.java
View license
private void setAllPermissions(FileSystem fileSystem, Path path) throws IOException {
    FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    try {
        fileSystem.setPermission(path, fsPermission);
    } catch (IOException ex) {
    }
    FileStatus fileStatus = fileSystem.getFileStatus(path);
    if (fileStatus.isDir()) {
        for (FileStatus status : fileSystem.listStatus(path)) {
            setAllPermissions(fileSystem, status.getPath());
        }
    }
}

82. LoadFuncHelper#determineFirstFile()

Project: pig
Source File: LoadFuncHelper.java
View license
/**
     * If location is a directory the first file found is returned
     * 
     * @param location
     * @return
     * @throws IOException
     *             if no file is found a FrontendException is thrown
     */
public Path determineFirstFile(String location) throws IOException {
    Path path = new Path(location);
    FileStatus status = fileSystem.getFileStatus(path);
    if (status.isDir()) {
        // get the first file.
        path = getFirstFile(fileSystem, path);
        if (path == null) {
            throw new FrontendException(location + " has no files");
        }
    }
    return path;
}

83. AvroStorageUtils#getLast()

Project: pig
Source File: AvroStorageUtils.java
View license
/** get last file of a hdfs path if it is  a directory;
     *   or return the file itself if path is a file
     */
public static Path getLast(Path path, FileSystem fs) throws IOException {
    FileStatus status = fs.getFileStatus(path);
    if (!status.isDir()) {
        return path;
    }
    FileStatus[] statuses = fs.listStatus(path, PATH_FILTER);
    if (statuses.length == 0) {
        return null;
    } else {
        Arrays.sort(statuses);
        for (int i = statuses.length - 1; i >= 0; i--) {
            if (!statuses[i].isDir()) {
                return statuses[i].getPath();
            }
        }
        return null;
    }
}

84. SegmentCreationJob#addDepsJarToDistributedCache()

Project: pinot
Source File: SegmentCreationJob.java
View license
private void addDepsJarToDistributedCache(Path path, Job job) throws IOException {
    LOGGER.info("Trying to add all the deps jar files from directory: {}", path);
    FileSystem fs = FileSystem.get(getConf());
    FileStatus[] fileStatusArr = fs.listStatus(path);
    for (FileStatus fileStatus : fileStatusArr) {
        if (fileStatus.isDirectory()) {
            addDepsJarToDistributedCache(fileStatus.getPath(), job);
        } else {
            Path depJarPath = fileStatus.getPath();
            if (depJarPath.getName().endsWith(".jar")) {
                LOGGER.info("Adding deps jar files: {}", path);
                job.addCacheArchive(path.toUri());
            }
        }
    }
}

85. SegmentCreationJob#getDataFilesFromPath()

Project: pinot
Source File: SegmentCreationJob.java
View license
private ArrayList<FileStatus> getDataFilesFromPath(FileSystem fs, Path inBaseDir) throws IOException {
    ArrayList<FileStatus> dataFileStatusList = new ArrayList<FileStatus>();
    FileStatus[] fileStatusArr = fs.listStatus(inBaseDir);
    for (FileStatus fileStatus : fileStatusArr) {
        if (fileStatus.isDirectory()) {
            LOGGER.info("Trying to add all the data files from directory: {}", fileStatus.getPath());
            dataFileStatusList.addAll(getDataFilesFromPath(fs, fileStatus.getPath()));
        } else {
            String fileName = fileStatus.getPath().getName();
            if (fileName.endsWith(".avro")) {
                LOGGER.info("Adding avro files: {}", fileStatus.getPath());
                dataFileStatusList.add(fileStatus);
            }
            if (fileName.endsWith(".csv")) {
                LOGGER.info("Adding csv files: {}", fileStatus.getPath());
                dataFileStatusList.add(fileStatus);
            }
            if (fileName.endsWith(".json")) {
                LOGGER.info("Adding json files: {}", fileStatus.getPath());
                dataFileStatusList.add(fileStatus);
            }
        }
    }
    return dataFileStatusList;
}

86. SegmentTarPushJob#run()

Project: pinot
Source File: SegmentTarPushJob.java
View license
public void run() throws Exception {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(_segmentPath);
    FileStatus[] fileStatusArr = fs.globStatus(path);
    for (FileStatus fileStatus : fileStatusArr) {
        if (fileStatus.isDirectory()) {
            pushDir(fs, fileStatus.getPath());
        } else {
            pushOneTarFile(fs, fileStatus.getPath());
        }
    }
}

87. SegmentUriPushJob#run()

Project: pinot
Source File: SegmentUriPushJob.java
View license
public void run() throws Exception {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(_segmentPath);
    FileStatus[] fileStatusArr = fs.globStatus(path);
    for (FileStatus fileStatus : fileStatusArr) {
        if (fileStatus.isDirectory()) {
            pushDir(fs, fileStatus.getPath());
        } else {
            pushOneTarFile(fs, fileStatus.getPath());
        }
    }
}

88. HiveMetadata#recursiveDeleteFilesStartingWith()

Project: presto
Source File: HiveMetadata.java
View license
/**
     * Attempt to remove all files in all directories within {@code location} that start with the {@code filePrefix}.
     * @return the files starting with the {@code filePrefix} that could not be removed
     */
private static List<String> recursiveDeleteFilesStartingWith(FileSystem fileSystem, Path directory, String filePrefix) {
    FileStatus[] allFiles;
    try {
        allFiles = fileSystem.listStatus(directory);
    } catch (IOException e) {
        return ImmutableList.of(directory + "/" + filePrefix + "*");
    }
    ImmutableList.Builder<String> notDeletedFiles = ImmutableList.builder();
    for (FileStatus fileStatus : allFiles) {
        Path path = fileStatus.getPath();
        if (HadoopFileStatus.isFile(fileStatus) && path.getName().startsWith(filePrefix)) {
            if (!deleteIfExists(fileSystem, path)) {
                notDeletedFiles.add(path.toString());
            }
        } else if (HadoopFileStatus.isDirectory(fileStatus)) {
            notDeletedFiles.addAll(recursiveDeleteFilesStartingWith(fileSystem, path, filePrefix));
        }
    }
    return notDeletedFiles.build();
}

89. FileUtil#getModificationTimeMsRecursive()

Project: secor
Source File: FileUtil.java
View license
public static long getModificationTimeMsRecursive(String path) throws IOException {
    FileSystem fs = getFileSystem(path);
    Path fsPath = new Path(path);
    FileStatus status = fs.getFileStatus(fsPath);
    long modificationTime = status.getModificationTime();
    FileStatus[] statuses = fs.listStatus(fsPath);
    if (statuses != null) {
        for (FileStatus fileStatus : statuses) {
            Path statusPath = fileStatus.getPath();
            String stringPath;
            if (path.startsWith("s3://") || path.startsWith("s3n://") || path.startsWith("s3a://") || path.startsWith("swift://") || path.startsWith("gs://")) {
                stringPath = statusPath.toUri().toString();
            } else {
                stringPath = statusPath.toUri().getPath();
            }
            if (!stringPath.equals(path)) {
                modificationTime = Math.max(modificationTime, getModificationTimeMsRecursive(stringPath));
            }
        }
    }
    return modificationTime;
}

90. TestSplittableBufferedWriter#ensureEmptyWriteDir()

View license
/** Create the directory where we'll write our test files to; and
   * make sure it has no files in it.
   */
private void ensureEmptyWriteDir() throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    Path writeDir = getWritePath();
    fs.mkdirs(writeDir);
    FileStatus[] stats = fs.listStatus(writeDir);
    for (FileStatus stat : stats) {
        if (stat.isDir()) {
            fail("setUp(): Write directory " + writeDir + " contains subdirectories");
        }
        LOG.debug("setUp(): Removing " + stat.getPath());
        if (!fs.delete(stat.getPath(), false)) {
            fail("setUp(): Could not delete residual file " + stat.getPath());
        }
    }
    if (!fs.exists(writeDir)) {
        fail("setUp: Could not create " + writeDir);
    }
}

91. TestAppendUtils#listFiles()

Project: sqoop
Source File: TestAppendUtils.java
View license
/** @return FileStatus for data files only. */
private FileStatus[] listFiles(FileSystem fs, Path path) throws IOException {
    FileStatus[] fileStatuses = fs.listStatus(path);
    ArrayList files = new ArrayList();
    Pattern patt = Pattern.compile("part.*-([0-9][0-9][0-9][0-9][0-9]).*");
    for (FileStatus fstat : fileStatuses) {
        String fname = fstat.getPath().getName();
        if (!fstat.isDir()) {
            Matcher mat = patt.matcher(fname);
            if (mat.matches()) {
                files.add(fstat);
            }
        }
    }
    return (FileStatus[]) files.toArray(new FileStatus[files.size()]);
}

92. TestMerge#recordStartsWith()

Project: sqoop
Source File: TestMerge.java
View license
/**
   * Return true if there's a file in 'dirName' with a line that starts with
   * 'prefix'.
   */
protected boolean recordStartsWith(List<Integer> record, String dirName, SqoopOptions.FileLayout fileLayout) throws Exception {
    Path warehousePath = new Path(LOCAL_WAREHOUSE_DIR);
    Path targetPath = new Path(warehousePath, dirName);
    FileSystem fs = FileSystem.getLocal(new Configuration());
    FileStatus[] files = fs.listStatus(targetPath);
    if (null == files || files.length == 0) {
        fail("Got no import files!");
    }
    for (FileStatus stat : files) {
        Path p = stat.getPath();
        if (p.getName().startsWith("part-")) {
            if (checkFileForLine(fs, p, fileLayout, record)) {
                // We found the line. Nothing further to do.
                return true;
            }
        }
    }
    return false;
}

93. TestMultiMaps#getDataFilePaths()

Project: sqoop
Source File: TestMultiMaps.java
View license
/** @return a list of Path objects for each data file */
protected List<Path> getDataFilePaths() throws IOException {
    List<Path> paths = new ArrayList<Path>();
    Configuration conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stats = fs.listStatus(getTablePath(), new Utils.OutputFileUtils.OutputFilesFilter());
    for (FileStatus stat : stats) {
        paths.add(stat.getPath());
    }
    return paths;
}

94. SQLServerMultiMapsManualTest#getDataFilePaths()

View license
// this test just uses the two int table.
/** @return a list of Path objects for each data file */
protected List<Path> getDataFilePaths() throws IOException {
    List<Path> paths = new ArrayList<Path>();
    Configuration conf = new Configuration();
    conf.set("fs.default.name", "file:///");
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] stats = fs.listStatus(getTablePath(), new Utils.OutputFileUtils.OutputFilesFilter());
    for (FileStatus stat : stats) {
        paths.add(stat.getPath());
    }
    return paths;
}

95. HDFSTransactionStateStorage#deleteLogsOlderThan()

View license
@Override
public void deleteLogsOlderThan(long timestamp) throws IOException {
    FileStatus[] statuses = fs.listStatus(snapshotDir, new LogFileFilter(0, timestamp));
    int removedCnt = 0;
    for (FileStatus status : statuses) {
        LOG.debug("Removing old transaction log {}", status.getPath());
        if (fs.delete(status.getPath(), false)) {
            removedCnt++;
        } else {
            LOG.error("Failed to delete transaction log file {}", status.getPath());
        }
    }
    LOG.debug("Removed {} transaction logs older than {}", removedCnt, timestamp);
}

96. MapReduceIndexerTool#listSortedOutputShardDirs()

View license
private FileStatus[] listSortedOutputShardDirs(Path outputReduceDir, FileSystem fs) throws FileNotFoundException, IOException {
    final String dirPrefix = SolrOutputFormat.getOutputName(job);
    FileStatus[] dirs = fs.listStatus(outputReduceDir, new PathFilter() {

        @Override
        public boolean accept(Path path) {
            return path.getName().startsWith(dirPrefix);
        }
    });
    for (FileStatus dir : dirs) {
        if (!dir.isDirectory()) {
            throw new IllegalStateException("Not a directory: " + dir.getPath());
        }
    }
    // use alphanumeric sort (rather than lexicographical sort) to properly handle more than 99999 shards
    Arrays.sort(dirs, ( f1,  f2) -> new AlphaNumericComparator().compare(f1.getPath().getName(), f2.getPath().getName()));
    return dirs;
}

97. HdfsDirectory#listAll()

Project: lucene-solr
Source File: HdfsDirectory.java
View license
@Override
public String[] listAll() throws IOException {
    FileStatus[] listStatus = getFileSystem().listStatus(hdfsDirPath);
    List<String> files = new ArrayList<>();
    if (listStatus == null) {
        return new String[] {};
    }
    for (FileStatus status : listStatus) {
        files.add(status.getPath().getName());
    }
    return getNormalNames(files);
}

98. HdfsLocalityReporter#refreshDirectory()

View license
/**
   * Update the cached block locations for the given directory. This includes deleting any files that no longer exist in
   * the file system and adding any new files that have shown up.
   * 
   * @param dir
   *          The directory to refresh
   * @throws IOException
   *           If there is a problem getting info from HDFS
   */
private void refreshDirectory(HdfsDirectory dir) throws IOException {
    Map<FileStatus, BlockLocation[]> directoryCache = cache.get(dir);
    Set<FileStatus> cachedStatuses = directoryCache.keySet();
    FileSystem fs = dir.getFileSystem();
    FileStatus[] statuses = fs.listStatus(dir.getHdfsDirPath());
    List<FileStatus> statusList = Arrays.asList(statuses);
    logger.debug("Updating locality information for: {}", statusList);
    // Keep only the files that still exist
    cachedStatuses.retainAll(statusList);
    // Fill in missing entries in the cache
    for (FileStatus status : statusList) {
        if (!status.isDirectory() && !directoryCache.containsKey(status)) {
            BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
            directoryCache.put(status, locations);
        }
    }
}

99. HdfsUpdateLog#getLogList()

Project: lucene-solr
Source File: HdfsUpdateLog.java
View license
public String[] getLogList(Path tlogDir) throws FileNotFoundException, IOException {
    final String prefix = TLOG_NAME + '.';
    FileStatus[] files = fs.listStatus(tlogDir, new PathFilter() {

        @Override
        public boolean accept(Path name) {
            return name.getName().startsWith(prefix);
        }
    });
    List<String> fileList = new ArrayList<>(files.length);
    for (FileStatus file : files) {
        fileList.add(file.getPath().getName());
    }
    return fileList.toArray(new String[0]);
}

100. HdfsBlobStoreImpl#listBlobStoreFiles()

View license
protected Iterator<BlobStoreFile> listBlobStoreFiles(Path path) throws IOException {
    ArrayList<BlobStoreFile> ret = new ArrayList<BlobStoreFile>();
    FileStatus[] files = _fs.listStatus(new Path[] { path });
    if (files != null) {
        for (FileStatus sub : files) {
            try {
                ret.add(new HdfsBlobStoreFile(sub.getPath().getParent(), sub.getPath().getName(), _hadoopConf));
            } catch (IllegalArgumentException e) {
                LOG.warn("Found an unexpected file in {} {}", path, sub.getPath().getName());
            }
        }
    }
    return ret.iterator();
}