Here are the examples of the java api org.apache.hadoop.fs.FileStatus.getPath() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
466 Examples
19
Source : DotDrillFile.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
/**
* Return base file name without the parent directory and extensions.
* @return Base file name.
*/
public String getBaseName() {
final String fileName = status.getPath().getName();
return fileName.substring(0, fileName.lastIndexOf(type.getEnding()));
}
18
Source : HDFSStorage.java
with Apache License 2.0
from pravega
with Apache License 2.0
from pravega
/**
* Makes the file represented by the given FileStatus read-only.
*
* @param file The FileDescriptor of the file to set. If this method returns true, this FileDescriptor will
* also be updated to indicate the file is read-only.
* @return True if the file was not read-only before (and it is now), or false if the file was already read-only.
* @throws IOException If an exception occurred.
*/
private boolean makeReadOnly(FileStatus file) throws IOException {
if (isReadOnly(file)) {
return false;
}
this.fileSystem.setPermission(file.getPath(), READONLY_PERMISSION);
log.debug("MakeReadOnly '{}'.", file.getPath());
return true;
}
18
Source : HDFSStorage.java
with Apache License 2.0
from pravega
with Apache License 2.0
from pravega
private boolean makeWrite(FileStatus file) throws IOException {
this.fileSystem.setPermission(file.getPath(), READWRITE_PERMISSION);
log.debug("MakeReadOnly '{}'.", file.getPath());
return true;
}
18
Source : HDFSStorage.java
with Apache License 2.0
from pravega
with Apache License 2.0
from pravega
private long getEpoch(FileStatus status) throws FileNameFormatException {
return getEpochFromPath(status.getPath());
}
18
Source : TestSnapshotCommands.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@After
public void tearDown() throws IOException {
if (fs.exists(new Path("/sub1"))) {
if (fs.exists(new Path("/sub1/.snapshot"))) {
for (FileStatus st : fs.listStatus(new Path("/sub1/.snapshot"))) {
fs.deleteSnapshot(new Path("/sub1"), st.getPath().getName());
}
fs.disallowSnapshot(new Path("/sub1"));
}
fs.delete(new Path("/sub1"), true);
}
}
18
Source : TestViewFileSystemAtHdfsRoot.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Override this so that we don't set the targetTestRoot to any path under the
* root of the FS, and so that we don't try to delete the test dir, but rather
* only its contents.
*/
@Override
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fHdfs.makeQualified(new Path("/"));
for (FileStatus status : fHdfs.listStatus(targetTestRoot)) {
fHdfs.delete(status.getPath(), true);
}
}
18
Source : FileStatusTreeTraverserTest.java
with Apache License 2.0
from HotelsDotCom
with Apache License 2.0
from HotelsDotCom
@Before
public void before() {
traverser = new FileStatusTreeTraverser(fileSystem);
when(fileStatus.isFile()).thenReturn(false);
when(fileStatus.getPath()).thenReturn(new Path("/tmp"));
}
18
Source : RelativePathFunction.java
with Apache License 2.0
from HotelsDotCom
with Apache License 2.0
from HotelsDotCom
@Override
public String apply(@Nonnull FileStatus fileStatus) {
return DistCpUtils.getRelativePath(sourceRootPath, fileStatus.getPath());
}
18
Source : HadoopFileStatusWrapper.java
with Apache License 2.0
from dremio
with Apache License 2.0
from dremio
@Override
public Path getPath() {
return Path.of(status.getPath().toUri());
}
18
Source : KafkaConnectHdfsProvider.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* List file status recursively.
*
* @param curPath Current Path
* @param filter PathFilter
* @return All file status match kafka connect naming convention
* @throws IOException
*/
private ArrayList<FileStatus> listAllFileStatus(Path curPath, KafkaConnectPathFilter filter) throws IOException {
ArrayList<FileStatus> allFileStatus = new ArrayList<>();
FileStatus[] fileStatus = this.fs.listStatus(curPath);
for (FileStatus status : fileStatus) {
if (status.isDirectory() && filter.acceptDir(status.getPath())) {
allFileStatus.addAll(listAllFileStatus(status.getPath(), filter));
} else {
if (filter.accept(status.getPath())) {
allFileStatus.add(status);
}
}
}
return allFileStatus;
}
18
Source : TestHoodieParquetInputFormat.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void ensureFilesInCommit(String msg, FileStatus[] files, String commit, int expected) {
int count = 0;
for (FileStatus file : files) {
String commitTs = FSUtils.getCommitTime(file.getPath().getName());
if (commit.equals(commitTs)) {
count++;
}
}
replacedertEquals(expected, count, msg);
}
18
Source : FilePathUtils.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static boolean isHiddenFile(FileStatus fileStatus) {
String name = fileStatus.getPath().getName();
// the log files is hidden file
return name.startsWith("_") || name.startsWith(".") && !name.contains(".log.");
}
18
Source : CopyOnWriteInputFormat.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
/**
* A simple hook to filter files and directories from the input.
* The method may be overridden. Hadoop's FileInputFormat has a similar mechanism and applies the
* same filters by default.
*
* @param fileStatus The file status to check.
* @return true, if the given file or directory is accepted
*/
public boolean acceptFile(FileStatus fileStatus) {
final String name = fileStatus.getPath().getName();
return !name.startsWith("_") && !name.startsWith(".") && !localFilesFilter.filterPath(new Path(fileStatus.getPath().toUri()));
}
17
Source : DFSUtil.java
with Apache License 2.0
from wgzhao
with Apache License 2.0
from wgzhao
private void addSourceFileIfNotEmpty(FileStatus f) {
if (f.isFile()) {
String filePath = f.getPath().toString();
if (f.getLen() > 0) {
addSourceFileByType(filePath);
} else {
LOG.warn("文件[{}]长度为0,将会跳过不作处理!", filePath);
}
}
}
17
Source : SyncPartitionMetadataProcedure.java
with Apache License 2.0
from trinodb
with Apache License 2.0
from trinodb
private static boolean isValidParreplacedionPath(FileStatus file, Column column, boolean caseSensitive) {
String path = file.getPath().getName();
if (!caseSensitive) {
path = path.toLowerCase(ENGLISH);
}
String prefix = column.getName() + '=';
return file.isDirectory() && path.startsWith(prefix);
}
17
Source : DiagnosticsEntryPoint.java
with Apache License 2.0
from steveloughran
with Apache License 2.0
from steveloughran
protected String statusToString(FileStatus status) {
String suffix;
if (status.isFile()) {
suffix = "\t[" + status.getLen() + "]";
} else {
if (!status.getPath().toString().endsWith("/")) {
suffix = "/";
} else {
suffix = "/";
}
}
return String.format("%s%s", status.getPath(), suffix);
}
17
Source : FileSystemTableModel.java
with GNU General Public License v3.0
from sdadas
with GNU General Public License v3.0
from sdadas
private void loadRows(FileStatus parent) throws FsException {
FsIterator iterator = this.connection.list(parent.getPath());
this.path = parent;
this.iterator = iterator;
this.children.clear();
if (this.path.getPath().getParent() != null) {
this.children.add(FileItem.parent());
}
this.hasMoreRows = true;
fetchRows(100, true);
firePathChanged(this.path.getPath());
}
17
Source : TestSwiftFileSystemPartitionedUploads.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private FileStatus resolveChild(FileStatus[] parentDirListing, Path childPath) {
FileStatus listedFileStat = null;
for (FileStatus stat : parentDirListing) {
if (stat.getPath().equals(childPath)) {
listedFileStat = stat;
}
}
return listedFileStat;
}
17
Source : ViewFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private Path getChrootedPath(InodeTree.ResolveResult<FileSystem> res, FileStatus status, Path f) throws IOException {
final String suffix = ((ChRootedFileSystem) res.targetFileSystem).stripOutRoot(status.getPath());
return this.makeQualified(suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix));
}
17
Source : RelativePathFunctionTest.java
with Apache License 2.0
from HotelsDotCom
with Apache License 2.0
from HotelsDotCom
@Test
public void typical() {
Path sourceRootPath = new Path("/root/");
Path path = new Path("/root/foo/bar/");
when(fileStatus.getPath()).thenReturn(path);
String relativePath = new RelativePathFunction(sourceRootPath).apply(fileStatus);
replacedertThat(relativePath, is("/foo/bar"));
}
17
Source : HFileArchiveTestingUtil.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
private static List<String> convertToString(List<FileStatus> files) {
List<String> originalFileNames = new ArrayList<String>(files.size());
for (FileStatus f : files) {
originalFileNames.add(f.getPath().getName());
}
return originalFileNames;
}
17
Source : TestCatalogJanitor.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
/**
* @param description description of the files for logging
* @param storeFiles the status of the files to log
*/
private void logFiles(String description, FileStatus[] storeFiles) {
LOG.debug("Current " + description + ": ");
for (FileStatus file : storeFiles) {
LOG.debug(file.getPath());
}
}
17
Source : PseudoDistributedFileSystem.java
with Apache License 2.0
from dremio
with Apache License 2.0
from dremio
/**
* Checks if a status for a file appears more than once.
* @param newStatus The new status for a file.
* @param oldStatuses Map of paths to previous file statuses.
* @return True if the file has already appeared. False otherwise.
*/
private static boolean checkDuplicateFileStatus(FileStatus newStatus, Map<String, FileStatus> oldStatuses) {
final FileStatus previousStatus = oldStatuses.put(newStatus.getPath().getName(), newStatus);
if (previousStatus != null) {
// merge conflict
if (previousStatus.isDirectory() != newStatus.isDirectory()) {
// Trying to merge a file and a directory but it's not supposed to happen
throw new IllegalStateException("Attempting to merge a file and a directory");
}
if (previousStatus.isFile()) {
// Trying to merge two files from different endpoints. Should not be possible either
throw new IllegalStateException("Attempting to merge two files for the same remote endpoint");
}
// TODO: DX-11234 Identify the correct behavior when multiple nodes have the same directory.
return true;
}
return false;
}
17
Source : DefaultRemoteDirectoryManager.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private Path getJobRootFolder(String jobName) throws IOException {
Path userRoot = getUserRootFolder();
Path jobRootPath = new Path(userRoot, jobName);
createFolderIfNotExist(jobRootPath);
// Get a file status to make sure it is a absolute path.
FileStatus fStatus = fs.getFileStatus(jobRootPath);
return fStatus.getPath();
}
17
Source : CrailHadoopFileSystem.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
return this.getFileBlockLocations(file.getPath(), start, len);
}
17
Source : HBCKAbstractFileStatusFilter.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public boolean accept(FileStatus f) {
return accept(f.getPath(), f.isDirectory());
}
17
Source : HdfsResourceLoader.java
with Apache License 2.0
from ambiverse-nlu
with Apache License 2.0
from ambiverse-nlu
private void doRetrieveMatchingResources(Path rootDir, String subPattern, Set<Resource> results) throws IOException {
if (!this.fs.isFile(rootDir)) {
FileStatus[] statuses = null;
statuses = this.fs.listStatus(rootDir);
if (!ObjectUtils.isEmpty(statuses)) {
String root = rootDir.toUri().getPath();
FileStatus[] var6 = statuses;
int var7 = statuses.length;
for (int var8 = 0; var8 < var7; ++var8) {
FileStatus fileStatus = var6[var8];
Path p = fileStatus.getPath();
String location = p.toUri().getPath();
if (location.startsWith(root)) {
location = location.substring(root.length());
}
if (fileStatus.isDir() && this.pathMatcher.matchStart(subPattern, location)) {
this.doRetrieveMatchingResources(p, subPattern, results);
} else if (this.pathMatcher.match(subPattern.substring(1), location)) {
results.add(new HdfsResource(p, this.fs));
}
}
}
} else if (this.pathMatcher.match(subPattern, stripPrefix(rootDir.toUri().getPath()))) {
results.add(new HdfsResource(rootDir, this.fs));
}
}
16
Source : FileSystemUtil.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
/**
* Helper method that will store in given holder statuses of all directories and files present in given path applying custom filter.
* If recursive flag is set to true, will call itself recursively to add nested directories and their file statuses.
*
* @param fs current file system
* @param path path to file or directory
* @param recursive true if nested directories and their files should be included
* @param statuses holder for directory and file statuses
* @param filter custom filter
* @return holder with all matching directory and file statuses
*/
private static List<FileStatus> listAll(FileSystem fs, Path path, boolean recursive, List<FileStatus> statuses, PathFilter filter) throws IOException {
for (FileStatus status : fs.listStatus(path, filter)) {
statuses.add(status);
if (status.isDirectory() && recursive) {
listAll(fs, status.getPath(), true, statuses, filter);
}
}
return statuses;
}
16
Source : BlockMapBuilder.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
private ImmutableRangeMap<Long, BlockLocation> getBlockMap(FileStatus status) throws IOException {
ImmutableRangeMap<Long, BlockLocation> blockMap = blockMapMap.get(status.getPath());
if (blockMap == null) {
blockMap = buildBlockMap(status);
}
return blockMap;
}
16
Source : FileSelection.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
public List<String> getFiles() {
if (files == null) {
final List<String> newFiles = Lists.newArrayList();
for (final FileStatus status : statuses) {
newFiles.add(status.getPath().toString());
}
files = newFiles;
}
return files;
}
16
Source : FileSelection.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
private static String commonPath(final List<FileStatus> statuses) {
if (statuses == null || statuses.isEmpty()) {
return "";
}
final List<String> files = Lists.newArrayList();
for (final FileStatus status : statuses) {
files.add(status.getPath().toString());
}
return commonPathForFiles(files);
}
16
Source : BasicFormatMatcher.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
/*
* Function returns true if the file extension matches the pattern
*/
@Override
public boolean isFileReadable(DrillFileSystem fs, FileStatus status) throws IOException {
CompressionCodec codec = null;
if (compressible) {
codec = codecFactory.getCodec(status.getPath());
}
String fileName = status.getPath().toString();
String fileNameHacked = null;
if (codec != null) {
fileNameHacked = fileName.substring(0, fileName.lastIndexOf('.'));
}
// Check for a matching pattern for compressed and uncompressed file name
for (Pattern p : patterns) {
if (p.matcher(fileName).matches()) {
return true;
}
if (fileNameHacked != null && p.matcher(fileNameHacked).matches()) {
return true;
}
}
if (matcher.matches(fs, status)) {
return true;
}
return false;
}
16
Source : DotDrillFile.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
public View getView(LogicalPlanPersistence lpPersistence) throws IOException {
Preconditions.checkArgument(type == DotDrillType.VIEW);
try (InputStream is = fs.open(status.getPath())) {
return lpPersistence.getMapper().readValue(is, View.clreplaced);
}
}
16
Source : DFSUtil.java
with Apache License 2.0
from wgzhao
with Apache License 2.0
from wgzhao
private void getHDFSAllFilesNORegex(String path, FileSystem hdfs) throws IOException {
// 获取要读取的文件的根目录
Path listFiles = new Path(path);
// If the network disconnected, this method will retry 45 times
// each time the retry interval for 20 seconds
// 获取要读取的文件的根目录的所有二级子文件目录
FileStatus[] stats = hdfs.listStatus(listFiles);
for (FileStatus f : stats) {
// 判断是不是目录,如果是目录,递归调用
if (f.isDirectory()) {
LOG.info("[{}] 是目录, 递归获取该目录下的文件", f.getPath());
getHDFSAllFilesNORegex(f.getPath().toString(), hdfs);
} else if (f.isFile()) {
addSourceFileIfNotEmpty(f);
} else {
String message = String.format("该路径[%s]文件类型既不是目录也不是文件,插件自动忽略。", f.getPath());
LOG.info(message);
}
}
}
16
Source : DFSUtil.java
with Apache License 2.0
from wgzhao
with Apache License 2.0
from wgzhao
public void getHDFSAllFiles(String hdfsPath) {
try {
FileSystem hdfs = FileSystem.get(hadoopConf);
// 判断hdfsPath是否包含正则符号
if (hdfsPath.contains("*") || hdfsPath.contains("?")) {
Path path = new Path(hdfsPath);
FileStatus[] stats = hdfs.globStatus(path);
for (FileStatus f : stats) {
if (f.isFile()) {
addSourceFileIfNotEmpty(f);
} else if (f.isDirectory()) {
getHDFSAllFilesNORegex(f.getPath().toString(), hdfs);
}
}
} else {
getHDFSAllFilesNORegex(hdfsPath, hdfs);
}
} catch (IOException e) {
String message = String.format("无法读取路径[%s]下的所有文件,请确认您的配置项fs.defaultFS, path的值是否正确," + "是否有读写权限,网络是否已断开!", hdfsPath);
LOG.error(message);
throw DataXException.asDataXException(HdfsReaderErrorCode.PATH_CONFIG_ERROR, e);
}
}
16
Source : Utilities.java
with Apache License 2.0
from Qihoo360
with Apache License 2.0
from Qihoo360
public static List<Path> convertStatusToPath(List<FileStatus> fileStatuses) {
List<Path> paths = new ArrayList<>();
if (fileStatuses != null) {
for (FileStatus fileStatus : fileStatuses) {
paths.add(fileStatus.getPath());
}
}
return paths;
}
16
Source : MockFileSystem.java
with Apache License 2.0
from pravega
with Apache License 2.0
from pravega
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (f.equals(root.getPath())) {
return root;
}
return getFileData(f).getStatus();
}
16
Source : TestSwiftFileSystemLsOperations.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListStatusFiltered() throws Throwable {
Path dir = path("/");
Path child = path("/test");
touch(fs, child);
FileStatus[] stats = fs.listStatus(dir, new AcceptAllFilter());
boolean found = false;
StringBuilder builder = new StringBuilder();
for (FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(child)) {
found = true;
}
}
replacedertTrue("Path " + child + " not found in directory " + dir + ":" + builder, found);
}
16
Source : TestSwiftFileSystemLsOperations.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testListNonEmptyRoot() throws Throwable {
Path test = path("/test");
touch(fs, test);
FileStatus[] fileStatuses = fs.listStatus(path("/"));
String stats = dumpStats("/", fileStatuses);
replacedertEquals("Wrong #of root children" + stats, 1, fileStatuses.length);
FileStatus status = fileStatuses[0];
replacedertEquals("Wrong path value" + stats, test, status.getPath());
}
16
Source : SimpleCopyListing.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static FileStatus[] getChildren(FileSystem fileSystem, FileStatus parent) throws IOException {
return fileSystem.listStatus(parent.getPath());
}
16
Source : HistoryServerFileSystemStateStoreService.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private int loadTokens(HistoryServerState state) throws IOException {
FileStatus[] stats = fs.listStatus(tokenStatePath);
int numTokens = 0;
for (FileStatus stat : stats) {
String name = stat.getPath().getName();
if (name.startsWith(TOKEN_BUCKET_DIR_PREFIX)) {
numTokens += loadTokensFromBucket(state, stat.getPath());
} else if (name.equals(TOKEN_KEYS_DIR_NAME)) {
// key loading is done elsewhere
continue;
} else {
LOG.warn("Skipping unexpected file in history server token state: " + stat.getPath());
}
}
return numTokens;
}
16
Source : HistoryFileManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
protected boolean deleteDir(FileStatus serialDir) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException {
return doneDirFc.delete(doneDirFc.makeQualified(serialDir.getPath()), true);
}
16
Source : TestReservedRawPaths.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = 120000)
public void testListRecursive() throws Exception {
Path rootPath = new Path("/");
Path p = rootPath;
for (int i = 0; i < 3; i++) {
p = new Path(p, "dir" + i);
fs.mkdirs(p);
}
Path curPath = new Path("/.reserved/raw");
int cnt = 0;
FileStatus[] fileStatuses = fs.listStatus(curPath);
while (fileStatuses != null && fileStatuses.length > 0) {
FileStatus f = fileStatuses[0];
replacedertMatches(f.getPath().toString(), "/.reserved/raw");
curPath = Path.getPathWithoutSchemeAndAuthority(f.getPath());
cnt++;
fileStatuses = fs.listStatus(curPath);
}
replacedertEquals(3, cnt);
}
16
Source : ViewFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static FileStatus fixFileStatus(FileStatus orig, Path qualified) throws IOException {
// FileStatus#getPath is a fully qualified path relative to the root of
// target file system.
// We need to change it to viewfs URI - relative to root of mount table.
// The implementors of RawLocalFileSystem were trying to be very smart.
// They implement FileStatus#getOwner lazily -- the object
// returned is really a RawLocalFileSystem that expect the
// FileStatus#getPath to be unchanged so that it can get owner when needed.
// Hence we need to interpose a new ViewFileSystemFileStatus that
// works around.
if ("file".equals(orig.getPath().toUri().getScheme())) {
orig = wrapLocalFileStatus(orig, qualified);
}
orig.setPath(qualified);
return orig;
}
16
Source : ViewFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public BlockLocation[] getFileBlockLocations(FileStatus fs, long start, long len) throws IOException {
final InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(fs.getPath()), true);
return res.targetFileSystem.getFileBlockLocations(new ViewFsFileStatus(fs, res.remainingPath), start, len);
}
16
Source : FileSystemRMStateStore.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
private void checkAndResumeUpdateOperation(Path path) throws Exception {
// Before loading the state information, check whether .new file exists.
// If it does, the prior updateFile is failed on half way. We need to
// complete replacing the old file first.
FileStatus[] newChildNodes = listStatusWithRetries(path, new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(".new");
}
});
for (FileStatus newChildNodeStatus : newChildNodes) {
replacedert newChildNodeStatus.isFile();
String newChildNodeName = newChildNodeStatus.getPath().getName();
String childNodeName = newChildNodeName.substring(0, newChildNodeName.length() - ".new".length());
Path childNodePath = new Path(newChildNodeStatus.getPath().getParent(), childNodeName);
replaceFile(newChildNodeStatus.getPath(), childNodePath);
}
}
16
Source : FileSelection.java
with Apache License 2.0
from lealone
with Apache License 2.0
from lealone
public List<Path> getFiles() {
if (files == null) {
List<Path> newFiles = Lists.newArrayList();
for (FileStatus status : statuses) {
newFiles.add(status.getPath());
}
files = newFiles;
}
return files;
}
16
Source : AbstractHadoopJob.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
private void appendTmpDir(Job job, FileSystem fs, Path tmpDir, StringBuilder jarList, StringBuilder fileList) {
try {
FileStatus[] fList = fs.listStatus(tmpDir);
for (FileStatus file : fList) {
Path p = file.getPath();
if (fs.getFileStatus(p).isDirectory()) {
appendTmpDir(job, fs, p, jarList, fileList);
continue;
}
StringBuilder list = (p.getName().endsWith(".jar")) ? jarList : fileList;
if (list.length() > 0)
list.append(",");
list.append(fs.getFileStatus(p).getPath().toString());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
16
Source : AppendTrieDictionaryChecker.java
with Apache License 2.0
from Kyligence
with Apache License 2.0
from Kyligence
public void listDictSlicePath(FileSystem fs, FileStatus path, List<Path> list) throws IOException {
if (path.isDirectory()) {
for (FileStatus status : fs.listStatus(path.getPath())) {
listDictSlicePath(fs, status, list);
}
} else {
if (path.getPath().getName().startsWith(GlobalDictHDFSStore.IndexFormatV1.SLICE_PREFIX)) {
list.add(path.getPath());
}
}
}
16
Source : TestBoundedRegionGroupingProvider.java
with Apache License 2.0
from fengchen8086
with Apache License 2.0
from fengchen8086
@Before
public void setUp() throws Exception {
FileStatus[] entries = fs.listStatus(new Path("/"));
for (FileStatus dir : entries) {
fs.delete(dir.getPath(), true);
}
}
See More Examples