org.apache.hadoop.fs.Path.getName()

Here are the examples of the java api org.apache.hadoop.fs.Path.getName() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

229 Examples 7

19 Source : InputFile.java
with Apache License 2.0
from NationalSecurityAgency

public String getFileName() {
    return path.getName();
}

18 Source : AthenaXYarnClusterDescriptor.java
with Apache License 2.0
from uber-archive

private void collectLocalResources(Map<String, LocalResource> resources, Set<Path> shippedPaths) throws IOException {
    for (Path p : clusterConf.resourcesToLocalize()) {
        resources.put(p.getName(), toLocalResource(p, LocalResourceVisibility.APPLICATION));
    }
    for (Path p : Iterables.concat(clusterConf.systemJars(), job.userProvidedJars())) {
        String name = p.getName();
        if (resources.containsKey(name)) {
            throw new IllegalArgumentException("Duplicated name in the shipped files " + p);
        }
        resources.put(name, toLocalResource(p, LocalResourceVisibility.APPLICATION));
        shippedPaths.add(p);
    }
}

18 Source : BackgroundHiveSplitLoader.java
with Apache License 2.0
from trinodb

private static int getRequiredBucketNumber(Path path) {
    return getBucketNumber(path.getName()).orElseThrow(() -> new IllegalStateException("Cannot get bucket number from path: " + path));
}

18 Source : ApplicationDriver.java
with GNU Lesser General Public License v3.0
from schic

private List<String> buildJarFiles(final MutableRef<String> primaryJarRef) throws IOException {
    final List<String> list = new ArrayList<>();
    final Path directoryPath = new Path(_jarDirectoryPath);
    final RemoteIterator<LocatedFileStatus> files = _fileSystem.listFiles(directoryPath, false);
    while (files.hasNext()) {
        final LocatedFileStatus file = files.next();
        final Path path = file.getPath();
        final String filename = path.getName();
        boolean primaryJar = false;
        for (final String prefix : PRIMARY_JAR_FILENAME_PREFIXES) {
            if (filename.startsWith(prefix)) {
                primaryJarRef.set(path.toString());
                primaryJar = true;
                break;
            }
        }
        if (!primaryJar) {
            list.add(path.toString());
        }
    }
    if (primaryJarRef.get() == null) {
        throw new IllegalArgumentException("Failed to find primary jar (starting with '" + PRIMARY_JAR_FILENAME_PREFIXES[0] + "') in JAR file directory: " + _jarDirectoryPath);
    }
    return list;
}

18 Source : MockFileSystem.java
with Apache License 2.0
from pravega

private FileData getFileData(Path f) throws IOException {
    synchronized (this.files) {
        FileData data = this.files.getOrDefault(f, null);
        if (data == null) {
            throw HDFSExceptionHelpers.segmentNotExistsException(f.getName());
        }
        return data;
    }
}

18 Source : HDFSStorage.java
with Apache License 2.0
from pravega

private static String getSegmentNameFromPath(Path path) throws FileNameFormatException {
    String fileName = path.getName();
    int pos2 = fileName.lastIndexOf(PART_SEPARATOR);
    if (pos2 <= 0) {
        throw new FileNameFormatException(fileName, "File must be in the following format: " + EXAMPLE_NAME_FORMAT);
    }
    return fileName.substring(0, pos2);
}

18 Source : HCFSJniFuseFileSystem.java
with Apache License 2.0
from opendataio

private int renameInternal(String oldPath, String newPath) {
    final Path oldUri = mPathResolverCache.getUnchecked(oldPath);
    final Path newUri = mPathResolverCache.getUnchecked(newPath);
    final String name = newUri.getName();
    if (name.length() > MAX_NAME_LENGTH) {
        LOG.error("Failed to rename {} to {}, name {} is longer than {} characters", oldPath, newPath, name, MAX_NAME_LENGTH);
        return -ErrorCodes.ENAMETOOLONG();
    }
    try {
        mFileSystem.rename(oldUri, newUri);
    } catch (Throwable e) {
        LOG.error("Failed to rename {} to {}: ", oldPath, newPath, e);
        return -ErrorCodes.EIO();
    }
    return 0;
}

18 Source : HCFSFuseFileSystem.java
with Apache License 2.0
from opendataio

/**
 * Renames a path.
 *
 * @param oldPath the source path in the FS
 * @param newPath the destination path in the FS
 * @return 0 on success, a negative value on error
 */
@Override
public int rename(String oldPath, String newPath) {
    final Path oldUri = mPathResolverCache.getUnchecked(oldPath);
    final Path newUri = mPathResolverCache.getUnchecked(newPath);
    final String name = newUri.getName();
    LOG.trace("rename({}, {}) [target: {}, {}]", oldPath, newPath, oldUri, newUri);
    if (name.length() > MAX_NAME_LENGTH) {
        LOG.error("Failed to rename {} to {}, name {} is longer than {} characters", oldPath, newPath, name, MAX_NAME_LENGTH);
        return -ErrorCodes.ENAMETOOLONG();
    }
    try {
        mFileSystem.rename(oldUri, newUri);
        OpenFileEntry oe = mOpenFiles.getFirstByField(PATH_INDEX, oldPath);
        if (oe != null) {
            oe.setPath(newPath);
        }
    } catch (FileNotFoundException e) {
        LOG.debug("Failed to rename {} to {}, file {} does not exist", oldPath, newPath, oldPath);
        return -ErrorCodes.ENOENT();
    } catch (FileAlreadyExistsException e) {
        LOG.debug("Failed to rename {} to {}, file {} already exists", oldPath, newPath, newPath);
        return -ErrorCodes.EEXIST();
    } catch (Throwable t) {
        LOG.error("Failed to rename {} to {}", oldPath, newPath, t);
        return AlluxioFuseUtils.getErrorCode(t);
    }
    return 0;
}

18 Source : FileSystemRMStateStore.java
with Apache License 2.0
from NJUJYB

private boolean checkAndRemovePartialRecord(Path record) throws IOException {
    // If the file ends with .tmp then it shows that it failed
    // during saving state into state store. The file will be deleted as a
    // part of this call
    if (record.getName().endsWith(".tmp")) {
        LOG.error("incomplete rm state store entry found :" + record);
        fs.delete(record, false);
        return true;
    }
    return false;
}

18 Source : LocalCacheDirectoryManager.java
with Apache License 2.0
from NJUJYB

/**
 * Given a path to a directory within a local cache tree return the
 * root of the cache directory.
 *
 * @param path the directory within a cache directory
 * @return the local cache directory root or null if not found
 */
public static Path getCacheDirectoryRoot(Path path) {
    while (path != null) {
        String name = path.getName();
        if (name.length() != 1) {
            return path;
        }
        int dirnum = DIRECTORIES_PER_LEVEL;
        try {
            dirnum = Integer.parseInt(name, DIRECTORIES_PER_LEVEL);
        } catch (NumberFormatException e) {
        }
        if (dirnum >= DIRECTORIES_PER_LEVEL) {
            return path;
        }
        path = path.getParent();
    }
    return path;
}

18 Source : CopyMapper.java
with Apache License 2.0
from NJUJYB

/**
 * Find entry from distributed cache
 *
 * @param cacheFiles - All localized cache files
 * @param fileName - fileName to search
 * @return Path of the filename if found, else null
 */
private Path findCacheFile(Path[] cacheFiles, String fileName) {
    if (cacheFiles != null && cacheFiles.length > 0) {
        for (Path file : cacheFiles) {
            if (file.getName().equals(fileName)) {
                return file;
            }
        }
    }
    return null;
}

18 Source : HistoryFileManager.java
with Apache License 2.0
from NJUJYB

private void addDirectoryToSerialNumberIndex(Path serialDirPath) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Adding " + serialDirPath + " to serial index");
    }
    String serialPart = serialDirPath.getName();
    String timestampPart = JobHistoryUtils.getTimestampPartFromPath(serialDirPath.toString());
    if (timestampPart == null) {
        LOG.warn("Could not find timestamp portion from path: " + serialDirPath + ". Continuing with next");
        return;
    }
    if (serialPart == null) {
        LOG.warn("Could not find serial portion from path: " + serialDirPath.toString() + ". Continuing with next");
    } else {
        serialNumberIndex.add(serialPart, timestampPart);
    }
}

18 Source : HistoryFileManager.java
with Apache License 2.0
from NJUJYB

private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) {
    String serialPart = serialDirPath.getName();
    String timeStampPart = JobHistoryUtils.getTimestampPartFromPath(serialDirPath.toString());
    if (timeStampPart == null) {
        LOG.warn("Could not find timestamp portion from path: " + serialDirPath.toString() + ". Continuing with next");
        return;
    }
    if (serialPart == null) {
        LOG.warn("Could not find serial portion from path: " + serialDirPath.toString() + ". Continuing with next");
        return;
    }
    serialNumberIndex.remove(serialPart, timeStampPart);
}

18 Source : TestSnapshotPathINodes.java
with Apache License 2.0
from NJUJYB

static void replacedertINodeFile(INode inode, Path path) {
    replacedertEquals(path.getName(), inode.getLocalName());
    replacedertEquals(INodeFile.clreplaced, inode.getClreplaced());
}

18 Source : TestSnapshotBlocksMap.java
with Apache License 2.0
from NJUJYB

void replacedertINodeNullInSnapshots(Path path, String... snapshots) throws Exception {
    for (String s : snapshots) {
        replacedertINodeNull(SnapshotTestHelper.getSnapshotPath(path.getParent(), s, path.getName()).toString());
    }
}

18 Source : CompressionCodecFactory.java
with Apache License 2.0
from NJUJYB

/**
 * Find the relevant compression codec for the given file based on its
 * filename suffix.
 * @param file the filename to check
 * @return the codec object
 */
public CompressionCodec getCodec(Path file) {
    CompressionCodec result = null;
    if (codecs != null) {
        String filename = file.getName();
        String reversedFilename = new StringBuilder(filename).reverse().toString();
        SortedMap<String, CompressionCodec> subMap = codecs.headMap(reversedFilename);
        if (!subMap.isEmpty()) {
            String potentialSuffix = subMap.lastKey();
            if (reversedFilename.startsWith(potentialSuffix)) {
                result = codecs.get(potentialSuffix);
            }
        }
    }
    return result;
}

18 Source : PathData.java
with Apache License 2.0
from NJUJYB

/**
 * Given a child of this directory, use the directory's path and the child's
 * basename to construct the string to the child.  This preserves relative
 * paths since Path will fully qualify.
 * @param childPath a path contained within this directory
 * @return String of the path relative to this directory
 */
private String getStringForChildPath(Path childPath) {
    String basename = childPath.getName();
    if (Path.CUR_DIR.equals(toString())) {
        return basename;
    }
    // check getPath() so scheme slashes aren't considered part of the path
    String separator = uri.getPath().endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR;
    return uriToString(uri, inferredSchemeFromPath) + separator + basename;
}

18 Source : TestSnapshotPathINodes.java
with Apache License 2.0
from naver

/**
 * for snapshot file after deleting the original file.
 */
@Test(timeout = 15000)
public void testSnapshotPathINodesAfterDeletion() throws Exception {
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s2");
    // Delete the original file /TestSnapshot/sub1/file1
    hdfs.delete(file1, false);
    final Snapshot snapshot;
    {
        // Resolve the path for the snapshot file
        // /TestSnapshot/sub1/.snapshot/s2/file1
        String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
        String[] names = INode.getPathNames(snapshotPath);
        byte[][] components = INode.getPathComponents(names);
        INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
        // Length of inodes should be (components.length - 1), since we will ignore
        // ".snapshot"
        replacedertEquals(nodesInPath.length(), components.length - 1);
        // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
        snapshot = getSnapshot(nodesInPath, "s2", 3);
        replacedertSnapshot(nodesInPath, true, snapshot, 3);
        // Check the INode for file1 (snapshot file)
        final INode inode = nodesInPath.getLastINode();
        replacedertEquals(file1.getName(), inode.getLocalName());
        replacedertTrue(inode.asFile().isWithSnapshot());
    }
    // Check the INodes for path /TestSnapshot/sub1/file1
    String[] names = INode.getPathNames(file1.toString());
    byte[][] components = INode.getPathComponents(names);
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // The length of inodes should be equal to components.length
    replacedertEquals(nodesInPath.length(), components.length);
    // The number of non-null elements should be components.length - 1 since
    // file1 has been deleted
    replacedertEquals(getNumNonNull(nodesInPath), components.length - 1);
    // The returned nodesInPath should be non-snapshot
    replacedertSnapshot(nodesInPath, false, snapshot, -1);
    // The last INode should be null, and the one before should be replacedociated
    // with sub1
    replacedertNull(nodesInPath.getINode(components.length - 1));
    replacedertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(), sub1.toString());
    replacedertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString());
    hdfs.deleteSnapshot(sub1, "s2");
    hdfs.disallowSnapshot(sub1);
}

18 Source : TestSnapshotPathINodes.java
with Apache License 2.0
from naver

/**
 * for snapshot file while modifying file after snapshot.
 */
@Test(timeout = 15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
    // First check the INode for /TestSnapshot/sub1/file1
    String[] names = INode.getPathNames(file1.toString());
    byte[][] components = INode.getPathComponents(names);
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // The number of inodes should be equal to components.length
    replacedertEquals(nodesInPath.length(), components.length);
    // The last INode should be replacedociated with file1
    replacedertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(), file1.toString());
    // record the modification time of the inode
    final long modTime = nodesInPath.getINode(nodesInPath.length() - 1).getModificationTime();
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s3");
    // Modify file1
    DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
    // Check the INodes for snapshot of file1
    String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
    names = INode.getPathNames(snapshotPath);
    components = INode.getPathComponents(names);
    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    // Length of ssInodes should be (components.length - 1), since we will
    // ignore ".snapshot"
    replacedertEquals(ssNodesInPath.length(), components.length - 1);
    final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
    replacedertSnapshot(ssNodesInPath, true, s3, 3);
    // Check the INode for snapshot of file1
    INode snapshotFileNode = ssNodesInPath.getLastINode();
    replacedertEquals(snapshotFileNode.getLocalName(), file1.getName());
    replacedertTrue(snapshotFileNode.asFile().isWithSnapshot());
    // The modification time of the snapshot INode should be the same with the
    // original INode before modification
    replacedertEquals(modTime, snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
    // Check the INode for /TestSnapshot/sub1/file1 again
    names = INode.getPathNames(file1.toString());
    components = INode.getPathComponents(names);
    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
    replacedertSnapshot(newNodesInPath, false, s3, -1);
    // The number of inodes should be equal to components.length
    replacedertEquals(newNodesInPath.length(), components.length);
    // The last INode should be replacedociated with file1
    final int last = components.length - 1;
    replacedertEquals(newNodesInPath.getINode(last).getFullPathName(), file1.toString());
    // The modification time of the INode for file3 should have been changed
    replacedert.replacedertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
    hdfs.deleteSnapshot(sub1, "s3");
    hdfs.disallowSnapshot(sub1);
}

18 Source : IngestMetricsMapper.java
with Apache License 2.0
from NationalSecurityAgency

/**
 * Makes two attempts to parse a path. If the path can be parsed using the Hadoop <code>Path</code> clreplaced, then this method will create a new
 * <code>Path</code> object using the supplied path and return the value of <code>Path.getName()</code>.
 *
 * Should that fail, this method try to locate the last '/' character and return the substring starting after the last '/' character.
 *
 * If there is no '/' character, then null is returned.
 *
 * @param path
 * @return
 */
public static String extractFileName(String path) {
    // first see if we have a full path
    try {
        Path p = new Path(path);
        return p.getName();
    } catch (Exception e) {
    /* this is ok-- we can continue */
    }
    int lastSlash = path.lastIndexOf('/');
    if (lastSlash == -1) {
        return null;
    } else {
        return path.substring(lastSlash + 1);
    }
}

18 Source : NonShardedSplitsFile.java
with Apache License 2.0
from NationalSecurityAgency

private static boolean matchesFileName(String cutFileName, Path cacheFile) {
    return cacheFile.getName().endsWith(cutFileName);
}

18 Source : MultiRFileOutputFormatter.java
with Apache License 2.0
from NationalSecurityAgency

/**
 * Insert a count into the filename. The filename is expected to end with our extension.
 *
 * @param filename
 * @param count
 * @return filename with the count inserted as follows: {@code path/name + extension -> path/name + _count + extension}
 */
protected Path insertFileCount(Path filename, int count) {
    String name = filename.getName();
    int index = name.length() - extension.length();
    name = name.substring(0, index) + '_' + count + name.substring(index);
    return new Path(filename.getParent(), name);
}

18 Source : MultiRFileOutputFormatter.java
with Apache License 2.0
from NationalSecurityAgency

/**
 * Remove a count from a filename. The filename is expected to end with _count.extension.
 *
 * @param filename
 * @return filename with the count removed as follows: {@code path/name + _count + extension -> path/name + extension}
 */
protected Path removeFileCount(Path filename) {
    String name = filename.getName();
    int index = name.length() - extension.length();
    int index2 = name.lastIndexOf('_', index);
    name = name.substring(0, index2) + name.substring(index);
    return new Path(filename.getParent(), name);
}

18 Source : HalyardBulkExport.java
with Apache License 2.0
from Merck

private String addTmpFile(String file) throws IOException {
    String tmpFiles = getConf().get("tmpfiles");
    Path path = new Path(new File(file).toURI());
    getConf().set("tmpfiles", tmpFiles == null ? path.toString() : tmpFiles + "," + path.toString());
    return path.getName();
}

18 Source : SubmissionFiles.java
with GNU General Public License v3.0
from icgc-dcc

public static String getProjectName(Path projectPath) {
    return projectPath.getName();
}

18 Source : DefaultWALProvider.java
with Apache License 2.0
from fengchen8086

public static boolean isMetaFile(Path p) {
    return isMetaFile(p.getName());
}

18 Source : ReplicationSourceManager.java
with Apache License 2.0
from fengchen8086

/**
 * Provide the id of the peer and a log key and this method will figure which
 * wal it belongs to and will log, for this region server, the current
 * position. It will also clean old logs from the queue.
 * @param log Path to the log currently being replicated from
 * replication status in zookeeper. It will also delete older entries.
 * @param id id of the peer cluster
 * @param position current location in the log
 * @param queueRecovered indicates if this queue comes from another region server
 * @param holdLogInZK if true then the log is retained in ZK
 */
public void logPositionAndCleanOldLogs(Path log, String id, long position, boolean queueRecovered, boolean holdLogInZK) {
    String fileName = log.getName();
    this.replicationQueues.setLogPosition(id, fileName, position);
    if (holdLogInZK) {
        return;
    }
    cleanOldLogs(fileName, id, queueRecovered);
}

18 Source : StoreFileInfo.java
with Apache License 2.0
from fengchen8086

/**
 * @param path Path to check.
 * @return True if the path has format of a HFile.
 */
public static boolean isHFile(final Path path) {
    return isHFile(path.getName());
}

18 Source : StoreFileInfo.java
with Apache License 2.0
from fengchen8086

/**
 * @param path Path to check.
 * @return True if the path has format of a HStoreFile reference.
 */
public static boolean isReference(final Path path) {
    return isReference(path.getName());
}

18 Source : LCIndexParameters.java
with Apache License 2.0
from fengchen8086

/**
 * LC_Home/[tableName]/[regionId]/[family]/[HFileId].lcindex
 * hdfsPath = /hdfs-hbase/[something...]/[tableName]/[regionId]/[family]/[HFileId]
 */
public Path getLocalDir(Path hdfsPath) {
    String hfileId = hdfsPath.getName();
    String family = hdfsPath.getParent().getName();
    return new Path(new Path(localRegionDir, family), hfileId + LCIndexConstant.LCINDEX_DIR_NAME);
}

18 Source : HRegionFileSystem.java
with Apache License 2.0
from fengchen8086

/**
 * Move the file from a build/temp location to the main family store directory.
 *
 * @param familyName      Family that will gain the file
 * @param buildPath       {@link Path} to the file to commit.
 * @param seqNum          Sequence Number to append to the file name (less then 0 if no sequence number)
 * @param generateNewName False if you want to keep the buildPath name
 * @return The new {@link Path} of the committed file
 * @throws IOException
 */
private Path commitStoreFile(final String familyName, final Path buildPath, final long seqNum, final boolean generateNewName) throws IOException {
    new IOException("this method should not be called").printStackTrace();
    Path storeDir = getStoreDir(familyName);
    if (!fs.exists(storeDir) && !createDir(storeDir))
        throw new IOException("Failed creating " + storeDir);
    String name = buildPath.getName();
    if (generateNewName) {
        name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
    }
    Path dstPath = new Path(storeDir, name);
    if (!fs.exists(buildPath)) {
        throw new FileNotFoundException(buildPath.toString());
    }
    LOG.debug("Committing store file " + buildPath + " as " + dstPath);
    // buildPath exists, therefore not doing an exists() check.
    if (!rename(buildPath, dstPath)) {
        throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
    }
    return dstPath;
}

18 Source : NamespaceUpgrade.java
with Apache License 2.0
from fengchen8086

/**
 * Migrate all tables into respective namespaces, either default or system.  We put them into
 * a temporary location, '.data', in case a user table is name 'data'.  In a later method we will
 * move stuff from .data to data.
 * @throws IOException
 */
public void migrateTables() throws IOException {
    List<String> sysTables = Lists.newArrayList("-ROOT-", ".META.", ".META");
    // Migrate tables including archive and tmp
    for (Path baseDir : baseDirs) {
        if (!fs.exists(baseDir))
            continue;
        List<Path> oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir);
        for (Path oldTableDir : oldTableDirs) {
            if (NON_USER_TABLE_DIRS.contains(oldTableDir.getName()))
                continue;
            if (sysTables.contains(oldTableDir.getName()))
                continue;
            // Make the new directory under the ns to which we will move the table.
            Path nsDir = new Path(this.defNsDir, TableName.valueOf(oldTableDir.getName()).getQualifierreplacedtring());
            LOG.info("Moving " + oldTableDir + " to " + nsDir);
            if (!fs.exists(nsDir.getParent())) {
                if (!fs.mkdirs(nsDir.getParent())) {
                    throw new IOException("Failed to create namespace dir " + nsDir.getParent());
                }
            }
            if (sysTables.indexOf(oldTableDir.getName()) < 0) {
                LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir);
                if (!fs.rename(oldTableDir, nsDir)) {
                    throw new IOException("Failed to move " + oldTableDir + " to namespace dir " + nsDir);
                }
            }
        }
    }
}

18 Source : HFileLink.java
with Apache License 2.0
from fengchen8086

/**
 * @param path Path to check.
 * @return True if the path is a HFileLink.
 */
public static boolean isHFileLink(final Path path) {
    return isHFileLink(path.getName());
}

18 Source : FileLink.java
with Apache License 2.0
from fengchen8086

/**
 * Get the referenced file name from the reference link directory path.
 *
 * @param dirPath Link references directory path
 * @return Name of the file referenced
 */
public static String getBackReferenceFileName(final Path dirPath) {
    return dirPath.getName().substring(BACK_REFERENCES_DIRECTORY_PREFIX.length());
}

18 Source : FileLink.java
with Apache License 2.0
from fengchen8086

/**
 * Checks if the specified directory path is a back reference links folder.
 *
 * @param dirPath Directory path to verify
 * @return True if the specified directory is a link references folder
 */
public static boolean isBackReferencesDir(final Path dirPath) {
    if (dirPath == null)
        return false;
    return dirPath.getName().startsWith(BACK_REFERENCES_DIRECTORY_PREFIX);
}

18 Source : PseudoDistributedFileSystem.java
with Apache License 2.0
from dremio

private Path createRemotePath(String address, Path path) {
    String basename = path.getName();
    return new Path(path.getParent(), isHidden(basename) ? String.format("%s%s@%s", basename.charAt(0), address, basename.substring(1)) : String.format("%s@%s", address, basename));
}

18 Source : TestAppendUtils.java
with Apache License 2.0
from dkhadoop

/**
 * @return the number part of a parreplacedion
 */
private int getFileParreplacedion(Path file) {
    String filename = file.getName();
    int pos = filename.lastIndexOf(FILEPART_SEPARATOR);
    if (pos != -1) {
        String part = filename.substring(pos + 1, pos + 1 + PARreplacedION_DIGITS);
        return Integer.parseInt(part);
    } else {
        return 0;
    }
}

18 Source : HDFSFile.java
with Apache License 2.0
from apache

@Override
public String getName() {
    return hdfsPath.getName();
}

18 Source : FSUtils.java
with Apache License 2.0
from apache

/**
 * Check if the file is a parquet file of a log file. Then get the fileId appropriately.
 */
public static String getFileIdFromFilePath(Path filePath) {
    if (FSUtils.isLogFile(filePath)) {
        return FSUtils.getFileIdFromLogPath(filePath);
    }
    return FSUtils.getFileId(filePath.getName());
}

18 Source : FileContextLocation.java
with Apache License 2.0
from apache

@Override
public String getName() {
    return path.getName();
}

17 Source : AbstractHadoopProcessor.java
with Apache License 2.0
from wangrenlei

/**
 * Returns the relative path of the child that does not include the filename or the root path.
 *
 * @param root
 *            the path to relativize from
 * @param child
 *            the path to relativize
 * @return the relative path
 */
public static String getPathDifference(final Path root, final Path child) {
    final int depthDiff = child.depth() - root.depth();
    if (depthDiff <= 1) {
        return "".intern();
    }
    String lastRoot = root.getName();
    Path childsParent = child.getParent();
    final StringBuilder builder = new StringBuilder();
    builder.append(childsParent.getName());
    for (int i = (depthDiff - 3); i >= 0; i--) {
        childsParent = childsParent.getParent();
        String name = childsParent.getName();
        if (name.equals(lastRoot) && childsParent.toString().endsWith(root.toString())) {
            break;
        }
        builder.insert(0, Path.SEPARATOR).insert(0, name);
    }
    return builder.toString();
}

17 Source : FileSystemPanel.java
with GNU General Public License v3.0
from sdadas

private WebBreadcrumbButton createBreadcrumbButton(Path path) {
    String name = path.getName();
    if (StringUtils.isBlank(name)) {
        name = "/";
    }
    WebBreadcrumbButton button = new WebBreadcrumbButton(name);
    button.setIcon(IconFactory.getIcon("folder-small"));
    button.addActionListener(event -> {
        ViewUtils.handleErrors(this, () -> {
            this.clearFilterAndSort();
            this.model.onFileClicked(path);
        });
    });
    return button;
}

17 Source : FileSystemActions.java
with GNU General Public License v3.0
from sdadas

private void doOpenArchive(List<FileItem> selection) {
    ViewUtils.handleErrors(parent, () -> {
        Path archivePath = selection.get(0).getPath();
        String archiveName = archivePath.getName();
        if (!StringUtils.endsWith(archiveName, ".har")) {
            ViewUtils.error(parent, "Hadoop archive should have a .har extension.");
            return;
        }
        HarFsConnection connection = new HarFsConnection(parent.getConnection(), archivePath);
        MainPanel mainPanel = BeanFactory.mainPanel();
        mainPanel.openFileSystemTab(connection, StringUtils.abbreviate(archiveName, 50));
    });
}

17 Source : FileSystemHelper.java
with BSD 3-Clause "New" or "Revised" License
from osmlab

/**
 * Get a resource from a FileSystem and a Path in that FileSystem
 *
 * @param fileSystem
 *            The FileSystem with the resource
 * @param path
 *            The path to the resource on the FileSystem
 * @return An InputStream for the resource
 */
private static InputStreamResource getResource(final FileSystem fileSystem, final Path path) {
    try {
        // This doesn't actually use an AutoCloseable resource in InputStreamResource
        final InputStreamResource resource = new InputStreamResource(() -> {
            try {
                return fileSystem.open(path);
            } catch (final Exception e) {
                throw new CoreException(UNABLE_TO_OPEN, path, e);
            }
        }).withName(path.getName());
        if (path.getName().endsWith(FileSuffix.GZIP.toString())) {
            resource.setDecompressor(Decompressor.GZIP);
        }
        return resource;
    } catch (final Exception e) {
        throw new CoreException(UNABLE_TO_READ, path, e);
    }
}

17 Source : PbfFilePathFilter.java
with BSD 3-Clause "New" or "Revised" License
from osmlab

@Override
public boolean accept(final Path path) {
    return path.getName().endsWith(FileSuffix.PBF.toString());
}

17 Source : LogFilePathFilter.java
with BSD 3-Clause "New" or "Revised" License
from osmlab

@Override
public boolean accept(final Path path) {
    return path.getName().endsWith(this.extension);
}

17 Source : DistCp.java
with Apache License 2.0
from NJUJYB

/**
 * Setup ssl configuration on the job configuration to enable hsftp access
 * from map job. Also copy the ssl configuration file to Distributed cache
 *
 * @param job - Reference to job's handle
 * @throws java.io.IOException - Exception if unable to locate ssl config file
 */
private void setupSSLConfig(Job job) throws IOException {
    Configuration configuration = job.getConfiguration();
    Path sslConfigPath = new Path(configuration.getResource(inputOptions.getSslConfigurationFile()).toString());
    addSSLFilesToDistCache(job, sslConfigPath);
    configuration.set(DistCpConstants.CONF_LABEL_SSL_CONF, sslConfigPath.getName());
    configuration.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfigPath.getName());
}

17 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Test
public void testRelativePathWitRepl() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    createFile(inputPath, fs, sub1.getName(), "a");
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, "input");
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchiveWithRepl();
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

17 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Test
public void testRelativePath() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    createFile(inputPath, fs, sub1.getName(), "a");
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, "input");
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchive();
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

17 Source : TestSnapshotPathINodes.java
with Apache License 2.0
from NJUJYB

/**
 * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
 * for snapshot file after deleting the original file.
 */
@Test(timeout = 15000)
public void testSnapshotPathINodesAfterDeletion() throws Exception {
    // Create a snapshot for the dir, and check the inodes for the path
    // pointing to a snapshot file
    hdfs.allowSnapshot(sub1);
    hdfs.createSnapshot(sub1, "s2");
    // Delete the original file /TestSnapshot/sub1/file1
    hdfs.delete(file1, false);
    final Snapshot snapshot;
    {
        // Resolve the path for the snapshot file
        // /TestSnapshot/sub1/.snapshot/s2/file1
        String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
        String[] names = INode.getPathNames(snapshotPath);
        byte[][] components = INode.getPathComponents(names);
        INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
        INode[] inodes = nodesInPath.getINodes();
        // Length of inodes should be (components.length - 1), since we will ignore
        // ".snapshot"
        replacedertEquals(inodes.length, components.length - 1);
        // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
        snapshot = getSnapshot(nodesInPath, "s2");
        replacedertSnapshot(nodesInPath, true, snapshot, 3);
        // Check the INode for file1 (snapshot file)
        final INode inode = inodes[inodes.length - 1];
        replacedertEquals(file1.getName(), inode.getLocalName());
        replacedertTrue(inode.asFile().isWithSnapshot());
    }
    // Check the INodes for path /TestSnapshot/sub1/file1
    String[] names = INode.getPathNames(file1.toString());
    byte[][] components = INode.getPathComponents(names);
    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
    INode[] inodes = nodesInPath.getINodes();
    // The length of inodes should be equal to components.length
    replacedertEquals(inodes.length, components.length);
    // The number of non-null elements should be components.length - 1 since
    // file1 has been deleted
    replacedertEquals(nodesInPath.getNumNonNull(), components.length - 1);
    // The returned nodesInPath should be non-snapshot
    replacedertSnapshot(nodesInPath, false, snapshot, -1);
    // The last INode should be null, and the one before should be replacedociated
    // with sub1
    replacedertNull(inodes[components.length - 1]);
    replacedertEquals(inodes[components.length - 2].getFullPathName(), sub1.toString());
    replacedertEquals(inodes[components.length - 3].getFullPathName(), dir.toString());
    hdfs.deleteSnapshot(sub1, "s2");
    hdfs.disallowSnapshot(sub1);
}

See More Examples