org.apache.hadoop.fs.RemoteIterator

Here are the examples of the java api org.apache.hadoop.fs.RemoteIterator taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

109 Examples 7

18 Source : RemoteIteratorAdaptor.java
with Apache License 2.0
from spotify

/**
 * Simple adaptor for Hadoop's RemoteIterator to the Iterator interface to be able to
 * use all the nice libraries/functions around iterators.
 *
 * @param <T> Type of the objects this iterator yields
 */
public clreplaced RemoteIteratorAdaptor<T> implements RemoteIterator<T>, Iterator<T> {

    static final Logger logger = LoggerFactory.getLogger(RemoteIteratorAdaptor.clreplaced);

    private final RemoteIterator<T> wrappedRemoteIter;

    public RemoteIteratorAdaptor(RemoteIterator<T> remoteIterator) {
        this.wrappedRemoteIter = remoteIterator;
    }

    @Override
    public boolean hasNext() {
        try {
            return this.wrappedRemoteIter.hasNext();
        } catch (IOException e) {
            logger.error("Remote iterator failed checking for next element in remote iterator", e);
            throw new WrappedRemoteIteratorException(e);
        }
    }

    @Override
    public T next() {
        try {
            return this.wrappedRemoteIter.next();
        } catch (IOException e) {
            logger.error("Failed retrieving next element from remote iterator", e);
            throw new WrappedRemoteIteratorException(e);
        }
    }

    public static clreplaced WrappedRemoteIteratorException extends RuntimeException {

        public WrappedRemoteIteratorException(Throwable cause) {
            super(cause);
        }
    }
}

18 Source : TestNodeManagerReboot.java
with Apache License 2.0
from NJUJYB

private int numOfUsercacheDELDirs(String localDir) throws IOException {
    int count = 0;
    RemoteIterator<FileStatus> fileStatus = localFS.listStatus(new Path(localDir));
    while (fileStatus.hasNext()) {
        FileStatus status = fileStatus.next();
        if (status.getPath().getName().matches(".*" + ContainerLocalizer.USERCACHE + "_DEL_.*")) {
            count++;
        }
    }
    return count;
}

18 Source : TestDistributedFileSystem.java
with Apache License 2.0
from NJUJYB

@Test(timeout = 60000)
public void testListFiles() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        DistributedFileSystem fs = cluster.getFileSystem();
        final Path relative = new Path("relative");
        fs.create(new Path(relative, "foo")).close();
        final List<LocatedFileStatus> retVal = new ArrayList<LocatedFileStatus>();
        final RemoteIterator<LocatedFileStatus> iter = fs.listFiles(relative, true);
        while (iter.hasNext()) {
            retVal.add(iter.next());
        }
        System.out.println("retVal = " + retVal);
    } finally {
        cluster.shutdown();
    }
}

18 Source : DirectoryStreamWrapper.java
with Apache License 2.0
from dremio

/**
 * Wrapper around Hadoop {@code org.apache.hadoop.fs.RemoteIterator}
 * @param <T>
 */
clreplaced DirectoryStreamWrapper implements DirectoryStream<FileAttributes> {

    private final RemoteIterator<FileStatus> remoteIterator;

    private final AtomicBoolean init = new AtomicBoolean(false);

    private final AtomicBoolean closed = new AtomicBoolean(false);

    public DirectoryStreamWrapper(RemoteIterator<FileStatus> remoteIterator) {
        this.remoteIterator = remoteIterator;
    }

    @Override
    public void close() throws IOException {
        if (!closed.compareAndSet(false, true)) {
            return;
        }
        if (remoteIterator instanceof Closeable) {
            ((Closeable) remoteIterator).close();
        }
    }

    @Override
    public Iterator<FileAttributes> iterator() {
        if (!init.compareAndSet(false, true)) {
            throw new IllegalStateException("Iterator already accessed.");
        }
        if (!closed.compareAndSet(false, true)) {
            throw new IllegalStateException("Directory stream already closed.");
        }
        return new Iterator<FileAttributes>() {

            private FileAttributes current = null;

            @Override
            public boolean hasNext() {
                if (current != null) {
                    return true;
                }
                try {
                    if (!remoteIterator.hasNext()) {
                        return false;
                    }
                    // DirectoryStream guarantees that next() will not throw an DirectoryIteratorException
                    // if hasNext() was called first
                    current = new FileStatusWrapper(remoteIterator.next());
                    return true;
                } catch (IOException e) {
                    throw new DirectoryIteratorException(e);
                }
            }

            @Override
            public FileAttributes next() {
                if (!hasNext()) {
                    throw new NoSuchElementException();
                }
                FileAttributes result = current;
                current = null;
                return result;
            }
        };
    }
}

17 Source : FsIterator.java
with GNU General Public License v3.0
from sdadas

/**
 * @author Sławomir Dadas
 */
public clreplaced FsIterator implements Iterator<FileStatus> {

    private final FsShell shell;

    private final FileSystem fs;

    private final RemoteIterator<LocatedFileStatus> inner;

    public FsIterator(FsShell shell, FileSystem fs, RemoteIterator<LocatedFileStatus> inner) {
        this.shell = shell;
        this.fs = fs;
        this.inner = inner;
    }

    @Override
    public boolean hasNext() {
        try {
            return inner.hasNext();
        } catch (IOException e) {
            throw new IllegalStateException(e);
        }
    }

    @Override
    public FileStatus next() {
        try {
            return inner.next();
        } catch (IOException e) {
            throw new IllegalStateException(e);
        }
    }
}

17 Source : UncacheFileAction.java
with Apache License 2.0
from Intel-bigdata

@VisibleForTesting
Long getCacheId(String fileName) throws Exception {
    CacheDirectiveInfo.Builder filterBuilder = new CacheDirectiveInfo.Builder();
    filterBuilder.setPath(new Path(fileName));
    CacheDirectiveInfo filter = filterBuilder.build();
    RemoteIterator<CacheDirectiveEntry> directiveEntries = dfsClient.listCacheDirectives(filter);
    if (!directiveEntries.hasNext()) {
        return null;
    }
    return directiveEntries.next().getInfo().getId();
}

17 Source : CacheFileAction.java
with Apache License 2.0
from Intel-bigdata

public boolean isCached(String fileName) throws Exception {
    CacheDirectiveInfo.Builder filterBuilder = new CacheDirectiveInfo.Builder();
    filterBuilder.setPath(new Path(fileName));
    CacheDirectiveInfo filter = filterBuilder.build();
    RemoteIterator<CacheDirectiveEntry> directiveEntries = dfsClient.listCacheDirectives(filter);
    return directiveEntries.hasNext();
}

16 Source : Rt2HisOnHive.java
with Apache License 2.0
from shunfei

private static boolean hasChild(FileSystem fileSystem, Path dir) throws IOException {
    if (!fileSystem.exists(dir)) {
        return false;
    }
    RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(dir, true);
    return iterator.hasNext();
}

16 Source : SafeFileOutputCommitter.java
with Apache License 2.0
from NationalSecurityAgency

protected boolean containsFiles(final FileSystem fs, final Path path, final List<Path> list) throws FileNotFoundException, IOException {
    RemoteIterator<Path> listing = listFiles(fs, path);
    while (listing.hasNext()) {
        list.add(listing.next());
    }
    return (!list.isEmpty());
}

16 Source : FSUtils.java
with Apache License 2.0
from apache

/**
 * Recursively processes all files in the base-path. If excludeMetaFolder is set, the meta-folder and all its subdirs
 * are skipped
 *
 * @param fs File System
 * @param basePathStr Base-Path
 * @param consumer Callback for processing
 * @param excludeMetaFolder Exclude .hoodie folder
 * @throws IOException -
 */
public static void processFiles(FileSystem fs, String basePathStr, Function<FileStatus, Boolean> consumer, boolean excludeMetaFolder) throws IOException {
    PathFilter pathFilter = excludeMetaFolder ? getExcludeMetaPathFilter() : ALLOW_ALL_FILTER;
    FileStatus[] topLevelStatuses = fs.listStatus(new Path(basePathStr));
    for (FileStatus child : topLevelStatuses) {
        if (child.isFile()) {
            boolean success = consumer.apply(child);
            if (!success) {
                throw new HoodieException("Failed to process file-status=" + child);
            }
        } else if (pathFilter.accept(child.getPath())) {
            RemoteIterator<LocatedFileStatus> itr = fs.listFiles(child.getPath(), true);
            while (itr.hasNext()) {
                FileStatus status = itr.next();
                boolean success = consumer.apply(status);
                if (!success) {
                    throw new HoodieException("Failed to process file-status=" + status);
                }
            }
        }
    }
}

15 Source : TestViewFsAtHdfsRoot.java
with Apache License 2.0
from NJUJYB

/**
 * Override this so that we don't set the targetTestRoot to any path under the
 * root of the FS, and so that we don't try to delete the test dir, but rather
 * only its contents.
 */
@Override
void initializeTargetTestRoot() throws IOException {
    targetTestRoot = fc.makeQualified(new Path("/"));
    RemoteIterator<FileStatus> dirContents = fc.listStatus(targetTestRoot);
    while (dirContents.hasNext()) {
        fc.delete(dirContents.next().getPath(), true);
    }
}

15 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

private FileStatus[] listStatusInternal(boolean located, Path dataPath) throws IOException {
    FileStatus[] dirPaths = new FileStatus[0];
    if (located) {
        RemoteIterator<LocatedFileStatus> stareplaceder = fsView.listLocatedStatus(dataPath);
        ArrayList<LocatedFileStatus> tmp = new ArrayList<LocatedFileStatus>(10);
        while (stareplaceder.hasNext()) {
            tmp.add(stareplaceder.next());
        }
        dirPaths = tmp.toArray(dirPaths);
    } else {
        dirPaths = fsView.listStatus(dataPath);
    }
    return dirPaths;
}

15 Source : ViewFs.java
with Apache License 2.0
from NJUJYB

@Override
public RemoteIterator<FileStatus> listStatusIterator(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    final InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true);
    final RemoteIterator<FileStatus> fsIter = res.targetFileSystem.listStatusIterator(res.remainingPath);
    if (res.isInternalDir()) {
        return fsIter;
    }
    return new RemoteIterator<FileStatus>() {

        final RemoteIterator<FileStatus> myIter;

        final ChRootedFs targetFs;

        {
            // Init
            myIter = fsIter;
            targetFs = (ChRootedFs) res.targetFileSystem;
        }

        @Override
        public boolean hasNext() throws IOException {
            return myIter.hasNext();
        }

        @Override
        public FileStatus next() throws IOException {
            FileStatus status = myIter.next();
            String suffix = targetFs.stripOutRoot(status.getPath());
            return new ViewFsFileStatus(status, makeQualified(suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)));
        }
    };
}

15 Source : SmartFileSystem.java
with Apache License 2.0
from Intel-bigdata

@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path) throws IOException {
    RemoteIterator<Path> corruptFileBlocksIterator = super.listCorruptFileBlocks(path);
    FileState fileState = smartDFSClient.getFileState(getPathName(path));
    if (fileState instanceof CompactFileState) {
        corruptFileBlocksIterator = super.listCorruptFileBlocks(new Path(((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath()));
    }
    return corruptFileBlocksIterator;
}

15 Source : TestPseudoDistributedFileSystem.java
with Apache License 2.0
from dremio

@Test
public void testListStatusIteratorPastLastElement() throws IOException {
    final Path root = new Path("/");
    final RemoteIterator<FileStatus> statusIter = fs.listStatusIterator(root);
    while (statusIter.hasNext()) {
        statusIter.next();
    }
    try {
        statusIter.next();
        fail("NoSuchElementException should be throw when next() is called when there are no elements remaining.");
    } catch (NoSuchElementException ex) {
    // OK.
    }
}

15 Source : PseudoDistributedFileSystem.java
with Apache License 2.0
from dremio

@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException {
    final RemoteIterator<FileStatus> remoteIterator = listStatusIterator(f);
    // Note: RemoteIterator has no relation to java.util.Iterator so frequently used
    // helper functions can't be called on them.
    final List<FileStatus> statuses = Lists.newArrayList();
    while (remoteIterator.hasNext()) {
        statuses.add(remoteIterator.next());
    }
    return statuses.toArray(new FileStatus[0]);
}

15 Source : HDFSEasy.java
with Apache License 2.0
from apache

public int countFiles(Path path) throws IOException {
    RemoteIterator<LocatedFileStatus> i = dfs.listFiles(path, false);
    int files = 0;
    while (i.hasNext()) {
        files++;
        i.next();
    }
    return files;
}

14 Source : HdfsRm.java
with Apache License 2.0
from whirlys

/**
 * 列出文件夹下的所有文件
 */
public static void listFiles(String remotePathStr, boolean recursive) throws IOException {
    Path remotePath = new Path(remotePathStr);
    FileSystem fileSystem = SysUtil.getFileSystem();
    RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(remotePath, recursive);
    System.out.println(String.format("文件夹《%s》下的所有文件:", remotePathStr));
    while (iterator.hasNext()) {
        System.out.println(iterator.next());
    }
    fileSystem.close();
}

14 Source : TestMRJobs.java
with Apache License 2.0
from NJUJYB

@Test(timeout = 60000)
public void testRandomWriter() throws IOException, InterruptedException, ClreplacedNotFoundException {
    LOG.info("\n\n\nStarting testRandomWriter().");
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;
    }
    RandomTextWriterJob randomWriterJob = new RandomTextWriterJob();
    mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
    mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
    Job job = randomWriterJob.createJob(mrCluster.getConfig());
    Path outputDir = new Path(OUTPUT_ROOT_DIR, "random-output");
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setSpeculativeExecution(false);
    // The AppMaster jar itself.
    job.addFileToClreplacedPath(APP_JAR);
    job.setJarByClreplaced(RandomTextWriterJob.clreplaced);
    // speed up failures
    job.setMaxMapAttempts(1);
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    replacedert.replacedertTrue(succeeded);
    replacedert.replacedertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    replacedert.replacedertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId, trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
    // Make sure there are three files in the output-dir
    RemoteIterator<FileStatus> iterator = FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir);
    int count = 0;
    while (iterator.hasNext()) {
        FileStatus file = iterator.next();
        if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
            count++;
        }
    }
    replacedert.replacedertEquals("Number of part files is wrong!", 3, count);
    verifyRandomWriterCounters(job);
// TODO later:  add explicit "isUber()" checks of some sort
}

14 Source : HistoryFileManager.java
with Apache License 2.0
from NJUJYB

private static List<FileStatus> scanDirectory(Path path, FileContext fc, PathFilter pathFilter) throws IOException {
    path = fc.makeQualified(path);
    List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
        FileStatus fileStatus = fileStatusIter.next();
        Path filePath = fileStatus.getPath();
        if (fileStatus.isFile() && pathFilter.accept(filePath)) {
            jhStatusList.add(fileStatus);
        }
    }
    return jhStatusList;
}

14 Source : TestRetryCacheWithHA.java
with Apache License 2.0
from NJUJYB

@SuppressWarnings("unchecked")
private void listCacheDirectives(HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>) poolNames.clone();
    RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
    int poolCount = poolNames.size();
    for (int i = 0; i < poolCount; i++) {
        CacheDirectiveEntry directive = directives.next();
        String pollName = directive.getInfo().getPool();
        replacedertTrue("The pool name should be expected", tmpNames.remove(pollName));
        if (i % 2 == 0) {
            int standby = active;
            active = (standby == 0) ? 1 : 0;
            cluster.transitionToStandby(standby);
            cluster.transitionToActive(active);
            cluster.waitActive(active);
        }
    }
    replacedertTrue("All pools must be found", tmpNames.isEmpty());
}

14 Source : ViewFsBaseTest.java
with Apache License 2.0
from NJUJYB

/**
 * Test modify operations (create, mkdir, delete, etc)
 * on the mount file system where the pathname references through
 * the mount points.  Hence these operation will modify the target
 * file system.
 *
 * Verify the operation via mountfs (ie fc) and *also* via the
 *  target file system (ie fclocal) that the mount link points-to.
 */
@Test
public void testOperationsThroughMountLinks() throws IOException {
    // Create file
    fileContextTestHelper.createFileNonRecursive(fcView, "/user/foo");
    replacedert.replacedertTrue("Create file should be file", isFile(fcView, new Path("/user/foo")));
    replacedert.replacedertTrue("Target of created file should be type file", isFile(fcTarget, new Path(targetTestRoot, "user/foo")));
    // Delete the created file
    replacedert.replacedertTrue("Delete should succeed", fcView.delete(new Path("/user/foo"), false));
    replacedert.replacedertFalse("File should not exist after delete", exists(fcView, new Path("/user/foo")));
    replacedert.replacedertFalse("Target File should not exist after delete", exists(fcTarget, new Path(targetTestRoot, "user/foo")));
    // Create file with a 2 component dirs
    fileContextTestHelper.createFileNonRecursive(fcView, "/internalDir/linkToDir2/foo");
    replacedert.replacedertTrue("Created file should be type file", isFile(fcView, new Path("/internalDir/linkToDir2/foo")));
    replacedert.replacedertTrue("Target of created file should be type file", isFile(fcTarget, new Path(targetTestRoot, "dir2/foo")));
    // Delete the created file
    replacedert.replacedertTrue("Delete should suceed", fcView.delete(new Path("/internalDir/linkToDir2/foo"), false));
    replacedert.replacedertFalse("File should not exist after deletion", exists(fcView, new Path("/internalDir/linkToDir2/foo")));
    replacedert.replacedertFalse("Target should not exist after deletion", exists(fcTarget, new Path(targetTestRoot, "dir2/foo")));
    // Create file with a 3 component dirs
    fileContextTestHelper.createFileNonRecursive(fcView, "/internalDir/internalDir2/linkToDir3/foo");
    replacedert.replacedertTrue("Created file should be of type file", isFile(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
    replacedert.replacedertTrue("Target of created file should also be type file", isFile(fcTarget, new Path(targetTestRoot, "dir3/foo")));
    // Recursive Create file with missing dirs
    fileContextTestHelper.createFile(fcView, "/internalDir/linkToDir2/missingDir/miss2/foo");
    replacedert.replacedertTrue("Created file should be of type file", isFile(fcView, new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
    replacedert.replacedertTrue("Target of created file should also be type file", isFile(fcTarget, new Path(targetTestRoot, "dir2/missingDir/miss2/foo")));
    // Delete the created file
    replacedert.replacedertTrue("Delete should succeed", fcView.delete(new Path("/internalDir/internalDir2/linkToDir3/foo"), false));
    replacedert.replacedertFalse("Deleted File should not exist", exists(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
    replacedert.replacedertFalse("Target of deleted file should not exist", exists(fcTarget, new Path(targetTestRoot, "dir3/foo")));
    // mkdir
    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"), FileContext.DEFAULT_PERM, false);
    replacedert.replacedertTrue("New dir should be type dir", isDir(fcView, new Path("/user/dirX")));
    replacedert.replacedertTrue("Target of new dir should be of type dir", isDir(fcTarget, new Path(targetTestRoot, "user/dirX")));
    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"), FileContext.DEFAULT_PERM, false);
    replacedert.replacedertTrue("New dir should be type dir", isDir(fcView, new Path("/user/dirX/dirY")));
    replacedert.replacedertTrue("Target of new dir should be of type dir", isDir(fcTarget, new Path(targetTestRoot, "user/dirX/dirY")));
    // Delete the created dir
    replacedert.replacedertTrue("Delete should succeed", fcView.delete(new Path("/user/dirX/dirY"), false));
    replacedert.replacedertFalse("Deleted File should not exist", exists(fcView, new Path("/user/dirX/dirY")));
    replacedert.replacedertFalse("Deleted Target should not exist", exists(fcTarget, new Path(targetTestRoot, "user/dirX/dirY")));
    replacedert.replacedertTrue("Delete should succeed", fcView.delete(new Path("/user/dirX"), false));
    replacedert.replacedertFalse("Deleted File should not exist", exists(fcView, new Path("/user/dirX")));
    replacedert.replacedertFalse("Deleted Target should not exist", exists(fcTarget, new Path(targetTestRoot, "user/dirX")));
    // Rename a file
    fileContextTestHelper.createFile(fcView, "/user/foo");
    fcView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
    replacedert.replacedertFalse("Renamed src should not exist", exists(fcView, new Path("/user/foo")));
    replacedert.replacedertFalse(exists(fcTarget, new Path(targetTestRoot, "user/foo")));
    replacedert.replacedertTrue(isFile(fcView, fileContextTestHelper.getTestRootPath(fcView, "/user/fooBar")));
    replacedert.replacedertTrue(isFile(fcTarget, new Path(targetTestRoot, "user/fooBar")));
    fcView.mkdir(new Path("/user/dirFoo"), FileContext.DEFAULT_PERM, false);
    fcView.rename(new Path("/user/dirFoo"), new Path("/user/dirFooBar"));
    replacedert.replacedertFalse("Renamed src should not exist", exists(fcView, new Path("/user/dirFoo")));
    replacedert.replacedertFalse("Renamed src should not exist in target", exists(fcTarget, new Path(targetTestRoot, "user/dirFoo")));
    replacedert.replacedertTrue("Renamed dest should  exist as dir", isDir(fcView, fileContextTestHelper.getTestRootPath(fcView, "/user/dirFooBar")));
    replacedert.replacedertTrue("Renamed dest should  exist as dir in target", isDir(fcTarget, new Path(targetTestRoot, "user/dirFooBar")));
    // Make a directory under a directory that's mounted from the root of another FS
    fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
    replacedert.replacedertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
    boolean dirFooPresent = false;
    RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path("/targetRoot/"));
    while (dirContents.hasNext()) {
        FileStatus fileStatus = dirContents.next();
        if (fileStatus.getPath().getName().equals("dirFoo")) {
            dirFooPresent = true;
        }
    }
    replacedert.replacedertTrue(dirFooPresent);
}

14 Source : ViewFileSystem.java
with Apache License 2.0
from NJUJYB

@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f, final PathFilter filter) throws FileNotFoundException, IOException {
    final InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true);
    final RemoteIterator<LocatedFileStatus> statusIter = res.targetFileSystem.listLocatedStatus(res.remainingPath);
    if (res.isInternalDir()) {
        return statusIter;
    }
    return new RemoteIterator<LocatedFileStatus>() {

        @Override
        public boolean hasNext() throws IOException {
            return statusIter.hasNext();
        }

        @Override
        public LocatedFileStatus next() throws IOException {
            final LocatedFileStatus status = statusIter.next();
            return (LocatedFileStatus) fixFileStatus(status, getChrootedPath(res, status, f));
        }
    };
}

14 Source : RecordingsAgent.java
with Apache License 2.0
from lealone

public List<RecordingInfo> getRecordingInfo(String appId) {
    List<RecordingInfo> result = new ArrayList<>();
    String dir = getRecordingsDirectory(appId);
    if (dir == null) {
        return result;
    }
    Path path = new Path(dir);
    try {
        FileStatus fileStatus = stramAgent.getFileSystem().getFileStatus(path);
        if (!fileStatus.isDirectory()) {
            return result;
        }
        RemoteIterator<LocatedFileStatus> ri = stramAgent.getFileSystem().listLocatedStatus(path);
        while (ri.hasNext()) {
            LocatedFileStatus lfs = ri.next();
            if (lfs.isDirectory()) {
                try {
                    String opId = lfs.getPath().getName();
                    result.addAll(getRecordingInfo(appId, opId));
                } catch (NumberFormatException ex) {
                // ignore
                }
            }
        }
    } catch (IOException ex) {
        LOG.warn("Cannot get recording info for app id {}: {}", appId, ex);
        return result;
    }
    return result;
}

14 Source : RecordingsAgent.java
with Apache License 2.0
from lealone

private List<RecordingInfo> getRecordingInfoHelper(String appId, String opId, Set<String> containers) {
    List<RecordingInfo> result = new ArrayList<>();
    String dir = getRecordingsDirectory(appId, opId);
    if (dir == null) {
        return result;
    }
    Path path = new Path(dir);
    try {
        FileStatus fileStatus = stramAgent.getFileSystem().getFileStatus(path);
        if (!fileStatus.isDirectory()) {
            return result;
        }
        RemoteIterator<LocatedFileStatus> ri = stramAgent.getFileSystem().listLocatedStatus(path);
        while (ri.hasNext()) {
            LocatedFileStatus lfs = ri.next();
            if (lfs.isDirectory()) {
                try {
                    String id = lfs.getPath().getName();
                    RecordingInfo recordingInfo = getRecordingInfoHelper(appId, opId, id, containers);
                    if (recordingInfo != null) {
                        result.add(recordingInfo);
                    }
                } catch (NumberFormatException ex) {
                // ignore
                }
            }
        }
    } catch (IOException ex) {
        LOG.warn("Cannot get recording info for app id {}: {}", appId, ex);
        return result;
    }
    return result;
}

14 Source : HdfsImpl.java
with MIT License
from heisedebaise

@Override
public List<String> list(String path, boolean recursive) {
    List<String> list = new ArrayList<>();
    if (isDisabled())
        return list;
    try {
        for (RemoteIterator<LocatedFileStatus> iterator = getFileSystem().listFiles(new Path(path), recursive); iterator.hasNext(); ) list.add(iterator.next().getPath().toUri().getPath());
    } catch (IOException e) {
        logger.warn(e, "列出HDFS文件[{}]时发生异常!", path);
    }
    return list;
}

14 Source : ProtoParquetWriterWithOffsetTest.java
with Apache License 2.0
from criteo

private Set<LocatedFileStatus> listFiles(FileSystem fs, Path p) throws IOException {
    RemoteIterator<LocatedFileStatus> it = fs.listFiles(p, true);
    Set<LocatedFileStatus> s = new HashSet<>();
    while (it.hasNext()) {
        s.add(it.next());
    }
    return s;
}

14 Source : FileSystemTestUtils.java
with Apache License 2.0
from apache

public static List<FileStatus> listRecursive(FileSystem fs, Path path) throws IOException {
    RemoteIterator<LocatedFileStatus> itr = fs.listFiles(path, true);
    List<FileStatus> statuses = new ArrayList<>();
    while (itr.hasNext()) {
        statuses.add(itr.next());
    }
    return statuses;
}

14 Source : FileContextLocation.java
with Apache License 2.0
from apache

@Override
public List<Location> list() throws IOException {
    RemoteIterator<FileStatus> statuses = fc.listStatus(path);
    ImmutableList.Builder<Location> result = ImmutableList.builder();
    while (statuses.hasNext()) {
        FileStatus status = statuses.next();
        if (!Objects.equals(path, status.getPath())) {
            result.add(new FileContextLocation(locationFactory, fc, status.getPath()));
        }
    }
    return result.build();
}

13 Source : LifecycleIT.java
with Apache License 2.0
from spotify

private int getFileCount(URI uri) throws IOException {
    FileSystem fs = gcpUtils.fileSystemForUri(uri);
    RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(uri), true);
    int count = 0;
    while (it.hasNext()) {
        it.next();
        count++;
    }
    return count;
}

13 Source : ApplicationDriver.java
with GNU Lesser General Public License v3.0
from schic

private List<String> buildJarFiles(final MutableRef<String> primaryJarRef) throws IOException {
    final List<String> list = new ArrayList<>();
    final Path directoryPath = new Path(_jarDirectoryPath);
    final RemoteIterator<LocatedFileStatus> files = _fileSystem.listFiles(directoryPath, false);
    while (files.hasNext()) {
        final LocatedFileStatus file = files.next();
        final Path path = file.getPath();
        final String filename = path.getName();
        boolean primaryJar = false;
        for (final String prefix : PRIMARY_JAR_FILENAME_PREFIXES) {
            if (filename.startsWith(prefix)) {
                primaryJarRef.set(path.toString());
                primaryJar = true;
                break;
            }
        }
        if (!primaryJar) {
            list.add(path.toString());
        }
    }
    if (primaryJarRef.get() == null) {
        throw new IllegalArgumentException("Failed to find primary jar (starting with '" + PRIMARY_JAR_FILENAME_PREFIXES[0] + "') in JAR file directory: " + _jarDirectoryPath);
    }
    return list;
}

13 Source : TestV2LsOperations.java
with Apache License 2.0
from NJUJYB

/**
 * To get this project to compile under Hadoop 1, this code needs to be
 * commented out
 *
 * @param fs filesystem
 * @param dir dir
 * @param subdir subdir
 * @param recursive recurse?
 * @throws IOException IO problems
 */
public static void replacedertListFilesFinds(FileSystem fs, Path dir, Path subdir, boolean recursive) throws IOException {
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(dir, recursive);
    boolean found = false;
    int entries = 0;
    StringBuilder builder = new StringBuilder();
    while (iterator.hasNext()) {
        LocatedFileStatus next = iterator.next();
        entries++;
        builder.append(next.toString()).append('\n');
        if (next.getPath().equals(subdir)) {
            found = true;
        }
    }
    replacedertTrue("Path " + subdir + " not found in directory " + dir + " : " + " entries=" + entries + " content" + builder.toString(), found);
}

13 Source : TestRetryCacheWithHA.java
with Apache License 2.0
from NJUJYB

@SuppressWarnings("unchecked")
private void listCachePools(HashSet<String> poolNames, int active) throws Exception {
    HashSet<String> tmpNames = (HashSet<String>) poolNames.clone();
    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
    int poolCount = poolNames.size();
    for (int i = 0; i < poolCount; i++) {
        CachePoolEntry pool = pools.next();
        String pollName = pool.getInfo().getPoolName();
        replacedertTrue("The pool name should be expected", tmpNames.remove(pollName));
        if (i % 2 == 0) {
            int standby = active;
            active = (standby == 0) ? 1 : 0;
            cluster.transitionToStandby(standby);
            cluster.transitionToActive(active);
            cluster.waitActive(active);
        }
    }
    replacedertTrue("All pools must be found", tmpNames.isEmpty());
}

13 Source : HistoryFileManager.java
with Apache License 2.0
from naver

@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc, PathFilter pathFilter) throws IOException {
    path = fc.makeQualified(path);
    List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
    try {
        RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
        while (fileStatusIter.hasNext()) {
            FileStatus fileStatus = fileStatusIter.next();
            Path filePath = fileStatus.getPath();
            if (fileStatus.isFile() && pathFilter.accept(filePath)) {
                jhStatusList.add(fileStatus);
            }
        }
    } catch (FileNotFoundException fe) {
        LOG.error("Error while scanning directory " + path, fe);
    }
    return jhStatusList;
}

13 Source : Hdfs.java
with Apache License 2.0
from greenplum-db

@Override
public ArrayList<String> list(String path) throws Exception {
    ReportUtils.startLevel(report, getClreplaced(), "List From " + path);
    RemoteIterator<LocatedFileStatus> list = fs.listFiles(getDatapath(path), true);
    ArrayList<String> filesList = new ArrayList<>();
    while (list.hasNext()) {
        filesList.add(list.next().getPath().toString());
    }
    ReportUtils.report(report, getClreplaced(), filesList.toString());
    ReportUtils.stopLevel(report);
    return filesList;
}

13 Source : HDFSEasy.java
with Apache License 2.0
from apache

public List<LocatedFileStatus> listFiles(Path path) throws IOException {
    RemoteIterator<LocatedFileStatus> i = dfs.listFiles(path, false);
    List<LocatedFileStatus> retList = new ArrayList<>();
    while (i.hasNext()) {
        LocatedFileStatus locatedFileStatus = i.next();
        retList.add(locatedFileStatus);
    }
    return retList;
}

12 Source : SegmentHelper.java
with Apache License 2.0
from shunfei

public static void literalAllSegments(FileSystem fileSystem, Path dir, Consumer<LocatedFileStatus> consumer) throws IOException {
    RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(dir, true);
    while (files.hasNext()) {
        LocatedFileStatus fileStatus = files.next();
        if (!fileStatus.isFile()) {
            continue;
        }
        if (fileStatus.getLen() == 0) {
            continue;
        }
        Path path = fileStatus.getPath();
        if (checkSegmentByPath(path)) {
            consumer.accept(fileStatus);
        }
    }
}

12 Source : CachingDirectoryLister.java
with Apache License 2.0
from openlookeng

@Override
public RemoteIterator<LocatedFileStatus> list(FileSystem fs, Table table, Path path) throws IOException {
    List<LocatedFileStatus> files = cache.getIfPresent(path);
    if (files != null) {
        return simpleRemoteIterator(files);
    }
    RemoteIterator<LocatedFileStatus> iterator = fs.listLocatedStatus(path);
    if (!tableNames.contains(table.getSchemaTableName())) {
        return iterator;
    }
    return cachingRemoteIterator(iterator, path);
}

12 Source : AbstractFlagConfig.java
with Apache License 2.0
from NationalSecurityAgency

protected Path getTestFile(FileSystem fs) throws IOException {
    createTestFiles(1, 1);
    Path file = null;
    for (RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(this.fmc.getBaseHDFSDir()), true); it.hasNext(); ) {
        LocatedFileStatus status = it.next();
        if (status.isFile()) {
            file = status.getPath();
            break;
        }
    }
    return file;
}

12 Source : HDFSResourceStore.java
with Apache License 2.0
from Kyligence

TreeSet<String> getAllFilePath(Path filePath, String resPathPrefix) throws IOException {
    String fsPathPrefix = filePath.toUri().getPath();
    TreeSet<String> fileList = new TreeSet<>();
    RemoteIterator<LocatedFileStatus> it = fs.listFiles(filePath, true);
    while (it.hasNext()) {
        String path = it.next().getPath().toUri().getPath();
        if (!path.startsWith(fsPathPrefix))
            throw new IllegalStateException("File path " + path + " is supposed to start with " + fsPathPrefix);
        String resPath = resPathPrefix + path.substring(fsPathPrefix.length() + 1);
        fileList.add(resPath);
    }
    return fileList;
}

12 Source : HDFSTest.java
with Apache License 2.0
from junneyang

@Test
public void testList() throws FileNotFoundException, IOException {
    Path f = new Path("/");
    RemoteIterator<LocatedFileStatus> files = fs.listFiles(f, true);
    while (files.hasNext()) {
        LocatedFileStatus file = (LocatedFileStatus) files.next();
        LOGGER.info("====={}", file.getPath());
    }
}

12 Source : TestCompressDecompress.java
with Apache License 2.0
from Intel-bigdata

@Test
public void testListLocatedStatus() throws Exception {
    // if (!loadedNative()) {
    // return;
    // }
    waitTillSSMExitSafeMode();
    // initDB();
    SmartFileSystem smartDfs = new SmartFileSystem();
    smartDfs.initialize(dfs.getUri(), ssm.getContext().getConf());
    int arraySize = 1024 * 1024 * 8;
    String fileName = "/ssm/compression/file4";
    byte[] bytes = prepareFile(fileName, arraySize);
    // For uncompressed file, SmartFileSystem and DistributedFileSystem behave exactly the same
    RemoteIterator<LocatedFileStatus> iter1 = dfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat1 = iter1.next();
    RemoteIterator<LocatedFileStatus> iter2 = smartDfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat2 = iter2.next();
    replacedert.replacedertEquals(stat1.getPath(), stat2.getPath());
    replacedert.replacedertEquals(stat1.getBlockSize(), stat2.getBlockSize());
    replacedert.replacedertEquals(stat1.getLen(), stat2.getLen());
    BlockLocation[] blockLocations1 = stat1.getBlockLocations();
    BlockLocation[] blockLocations2 = stat2.getBlockLocations();
    replacedert.replacedertEquals(blockLocations1.length, blockLocations2.length);
    for (int i = 0; i < blockLocations1.length; i++) {
        replacedert.replacedertEquals(blockLocations1[i].getLength(), blockLocations2[i].getLength());
        replacedert.replacedertEquals(blockLocations1[i].getOffset(), blockLocations2[i].getOffset());
    }
    // Test compressed file
    int bufSize = 1024 * 1024;
    CmdletManager cmdletManager = ssm.getCmdletManager();
    long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName + " -bufSize " + bufSize + " -codec " + codec);
    waitTillActionDone(cmdId);
    RemoteIterator<LocatedFileStatus> iter3 = dfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat3 = iter3.next();
    BlockLocation[] blockLocations3 = stat3.getBlockLocations();
    RemoteIterator<LocatedFileStatus> iter4 = smartDfs.listLocatedStatus(new Path(fileName));
    LocatedFileStatus stat4 = iter4.next();
    BlockLocation[] blockLocations4 = stat4.getBlockLocations();
    replacedert.replacedertEquals(stat1.getPath(), stat4.getPath());
    replacedert.replacedertEquals(stat1.getBlockSize(), stat4.getBlockSize());
    replacedert.replacedertEquals(stat1.getLen(), stat4.getLen());
}

12 Source : CacheScheduler.java
with Apache License 2.0
from Intel-bigdata

/**
 * For cache acton that going through cache scheduler, SSM cache pool will
 * be checked and created.
 * @param dfsClient
 * @throws IOException
 */
public static void createCachePool(DFSClient dfsClient) throws IOException {
    RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
    while (poolEntries.hasNext()) {
        CachePoolEntry poolEntry = poolEntries.next();
        if (poolEntry.getInfo().getPoolName().equals(SSM_POOL)) {
            return;
        }
    }
    dfsClient.addCachePool(new CachePoolInfo(SSM_POOL));
}

12 Source : HadoopFileSystemUtils.java
with GNU General Public License v3.0
from icgc-dcc

private static List<LocatedFileStatus> getFiles(FileSystem fileSystem, Path target, boolean recusre) {
    val results = Lists.<LocatedFileStatus>newArrayList();
    RemoteIterator<LocatedFileStatus> fileStatusLisreplacederator = null;
    try {
        fileStatusLisreplacederator = fileSystem.listFiles(target, true);
        while (fileStatusLisreplacederator.hasNext()) {
            LocatedFileStatus fileStatus = fileStatusLisreplacederator.next();
            results.add(fileStatus);
        }
    } catch (IOException e) {
        log.info("Error retriving files in path '{}'", target);
    }
    return results;
}

12 Source : HadoopFileSystem.java
with Apache License 2.0
from DSC-SPIDAL

/**
 * List the statuses of the files/directories in the given path if the path is
 * a directory.
 *
 * @param f given path
 * @return the statuses of the files/directories in the given patch
 */
@Override
public FileStatus[] listFiles(Path f) throws IOException {
    RemoteIterator<LocatedFileStatus> listFiles = this.hadoopFileSystem.listFiles(toHadoopPath(f), true);
    List<FileStatus> statusList = new ArrayList<>();
    while (listFiles.hasNext()) {
        LocatedFileStatus next = listFiles.next();
        FileStatus status = new HadoopFileStatus(next);
        statusList.add(status);
    }
    return statusList.toArray(new FileStatus[0]);
}

12 Source : RemoteNodeFileSystem.java
with Apache License 2.0
from dremio

@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException {
    RemoteIterator<FileStatus> remoteIterator = listStatusIterator(f);
    List<FileStatus> statuses = new ArrayList<>();
    while (remoteIterator.hasNext()) {
        statuses.add(remoteIterator.next());
    }
    return statuses.toArray(new FileStatus[0]);
}

12 Source : PDFSProtocol.java
with Apache License 2.0
from dremio

private Response handle(PhysicalConnection connection, DFS.ListStatusRequest request) throws IOException {
    final RemoteIterator<FileStatus> iterator;
    if (request.hasHandle()) {
        final ListStatusContinuationHandle handle = request.getHandle();
        iterator = openIterators.getIfPresent(handle);
        if (iterator == null) {
            throw new IOException(format("No iterator found for handle %s/path %s. Maybe it expired?", handle, request.getPath()));
        }
        // invalidate the previous handle as a new one will be created if needed
        openIterators.invalidate(handle);
    } else {
        Preconditions.checkArgument(request.hasPath(), "No path argument provided for listStatus.");
        Path path = new Path(request.getPath());
        iterator = localFS.listStatusIterator(path);
    }
    final DFS.ListStatusResponse.Builder response = DFS.ListStatusResponse.newBuilder();
    try {
        // Only return as much as {limit} results (or all of them if no limit)
        for (int i = 0; iterator.hasNext() && (!request.hasLimit() || i < request.getLimit()); i++) {
            response.addStatuses(RemoteNodeFileSystem.toProtoFileStatus(iterator.next()));
        }
        // Check if more results are available
        if (iterator.hasNext()) {
            ListStatusContinuationHandle handle = ListStatusContinuationHandle.newBuilder().setId(UUID.randomUUID().toString()).build();
            openIterators.put(handle, iterator);
            response.setHandle(handle);
        }
    } finally {
        // If response has no handle (because not enough results or exception)
        // make sure to close the iterator
        if (!response.hasHandle() && iterator instanceof Closeable) {
            ((Closeable) iterator).close();
        }
    }
    return reply(DFS.RpcType.LIST_STATUS_RESPONSE, response.build());
}

11 Source : HdfsIOBenchmark.java
with Apache License 2.0
from zrlio

void enumerateDir() throws Exception {
    System.out.println("enumarate dir, path " + path);
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    int repfactor = 4;
    for (int k = 0; k < repfactor; k++) {
        long start = System.currentTimeMillis();
        for (int i = 0; i < size; i++) {
            // single operation == loop
            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(path, false);
            while (iter.hasNext()) {
                iter.next();
            }
        }
        long end = System.currentTimeMillis();
        double executionTime = ((double) (end - start));
        double latency = executionTime * 1000.0 / ((double) size);
        System.out.println("execution time [ms] " + executionTime);
        System.out.println("latency [us] " + latency);
    }
    fs.close();
}

11 Source : TestCTTAS.java
with Apache License 2.0
from zpochen

private void checkPermission(String tmpTableName) throws IOException {
    List<Path> matchingPath = findTemporaryTableLocation(tmpTableName);
    replacedertEquals("Only one directory should match temporary table name " + tmpTableName, 1, matchingPath.size());
    Path tmpTablePath = matchingPath.get(0);
    replacedertEquals("Directory permission should match", expectedFolderPermission, fs.getFileStatus(tmpTablePath).getPermission());
    RemoteIterator<LocatedFileStatus> fileIterator = fs.listFiles(tmpTablePath, false);
    while (fileIterator.hasNext()) {
        replacedertEquals("File permission should match", expectedFilePermission, fileIterator.next().getPermission());
    }
}

11 Source : Cloudup.java
with Apache License 2.0
from steveloughran

/**
 * List the source files and build the list.
 * @return list of uploads
 * @throws IOException failure to list
 */
private List<UploadEntry> createUploadList() throws IOException {
    List<UploadEntry> uploads = new ArrayList<>();
    RemoteIterator<LocatedFileStatus> ri = sourceFS.listFiles(sourcePath, true);
    while (ri.hasNext()) {
        LocatedFileStatus status = ri.next();
        UploadEntry entry = new UploadEntry(status);
        entry.setDest(getFinalPath(status.getPath()));
        uploads.add(entry);
    }
    LOG.info("List {}", ri);
    if (ri instanceof Closeable) {
        ((Closeable) ri).close();
    }
    return uploads;
}

See More Examples