Here are the examples of the java api org.apache.hadoop.hdfs.protocol.HdfsFileStatus taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
112 Examples
19
Source : TestJsonUtil.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(), f.isSymlink() ? new Path(f.getSymlink()) : null, new Path(f.getFullName(parent)));
}
19
Source : NamenodeFsck.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
boolean hdfsPathExists(String path) throws AccessControlException, UnresolvedLinkException, IOException {
try {
HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path);
return (hfs != null);
} catch (FileNotFoundException e) {
return false;
}
}
19
Source : UnErasureCodingAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
protected void execute() throws Exception {
final String MATCH_RESULT = "The current EC policy is replication already.";
final String DIR_RESULT = "The replication EC policy is set successfully for the given directory.";
final String CONVERT_RESULT = "The file is converted successfully with replication EC policy.";
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if ecPolicy is null, it means replication.
if (srcEcPolicy == null) {
this.progress = 1.0F;
appendLog(MATCH_RESULT);
return;
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
try {
convert(fileStatus);
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
appendLog(String.format("The current EC policy is %s.", REPLICATION_POLICY_NAME));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
}
}
19
Source : TestWriteFileAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Test
public void testExecute() throws Exception {
String filePath = "/testWriteFile/fadsfa/213";
long size = 10000;
writeFile(filePath, size);
HdfsFileStatus fileStatus = dfs.getClient().getFileInfo(filePath);
replacedert.replacedertTrue(fileStatus.getLen() == size);
}
19
Source : MoveFileAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
private boolean recheckModification() {
try {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
return true;
}
boolean closed = dfsClient.isFileClosed(fileName);
if (!closed || (movePlan.getFileId() != 0 && fileStatus.getFileId() != movePlan.getFileId()) || fileStatus.getLen() != movePlan.getFileLength() || fileStatus.getModificationTime() != movePlan.getModificationTime()) {
return true;
}
return false;
} catch (Exception e) {
// check again for this case
return true;
}
}
19
Source : CheckStorageAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
protected void execute() throws Exception {
if (fileName == null) {
throw new IllegalArgumentException("File parameter is missing! ");
}
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new ActionException("File does not exist.");
}
if (fileStatus.isDir()) {
appendResult("This is a directory which has no storage result!");
// Append to log for the convenience of UI implementation
appendLog("This is a directory which has no storage result!");
return;
}
long length = fileStatus.getLen();
List<LocatedBlock> locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
if (locatedBlocks.size() == 0) {
appendResult("File '" + fileName + "' has no blocks.");
appendLog("File '" + fileName + "' has no blocks.");
return;
}
for (LocatedBlock locatedBlock : locatedBlocks) {
StringBuilder blockInfo = new StringBuilder();
blockInfo.append("File offset = ").append(locatedBlock.getStartOffset()).append(", ");
blockInfo.append("Block locations = {");
for (DatanodeInfo datanodeInfo : locatedBlock.getLocations()) {
blockInfo.append(datanodeInfo.getName());
if (datanodeInfo instanceof DatanodeInfoWithStorage) {
blockInfo.append("[").append(((DatanodeInfoWithStorage) datanodeInfo).getStorageType()).append("]");
}
blockInfo.append(" ");
}
blockInfo.append("}");
appendResult(blockInfo.toString());
appendLog(blockInfo.toString());
}
}
19
Source : CacheFileAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
protected void execute() throws Exception {
if (fileName == null) {
throw new IllegalArgumentException("File parameter is missing! ");
}
// set cache replication as the replication number of the file if not set
if (replication == 0) {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
replication = fileStatus.isDir() ? 1 : fileStatus.getReplication();
}
addActionEvent(fileName);
executeCacheAction(fileName);
}
18
Source : Nfs3Utils.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public static WccAttr getWccAttr(DFSClient client, String fileIdPath) throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? Nfs3FileAttributes.getDirSize(fstat.getChildrenNum()) : fstat.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime()));
}
18
Source : FileDataServlet.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Select a datanode to service this request.
* Currently, this looks at no more than the first five blocks of a file,
* selecting a datanode randomly from the most represented.
* @param conf
*/
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i, Configuration conf) throws IOException {
if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
// pick a random datanode
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(getServletContext());
return NamenodeJspHelper.getRandomDatanode(nn);
}
return JspHelper.bestNode(blks, conf);
}
18
Source : Nfs3Utils.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
public static WccAttr getWccAttr(DFSClient client, String fileIdPath) throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime()));
}
18
Source : FSDirStatAndListingOp.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Get a partial listing of the indicated directory
*
* We will stop when any of the following conditions is met:
* 1) this.lsLimit files have been added
* 2) needLocation is true AND enough files have been added such
* that at least this.lsLimit block locations are in the response
*
* @param fsd FSDirectory
* @param iip the INodesInPath instance containing all the INodes along the
* path
* @param src the directory name
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
* @return a partial listing starting after startAfter
*/
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException {
String srcs = FSDirectory.normalizePath(src);
final boolean isRawPath = FSDirectory.isReservedRawName(src);
fsd.readLock();
try {
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(fsd, srcs, startAfter);
}
final int snapshot = iip.getPathSnapshotId();
final INode targetNode = iip.getLastINode();
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ? targetNode.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
if (!targetNode.isDirectory()) {
return new DirectoryListing(new HdfsFileStatus[] { createFileStatus(fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip) }, 0);
}
final INodeDirectory dirInode = targetNode.asDirectory();
final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
int startChild = INodeDirectory.nextChild(contents, startAfter);
int totalNumChildren = contents.size();
int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit());
int locationBudget = fsd.getLsLimit();
int listingCnt = 0;
HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
INode cur = contents.get(startChild + i);
byte curPolicy = isSuperUser && !cur.isSymlink() ? cur.getLocalStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip);
listingCnt++;
if (needLocation) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
LocatedBlocks blks = ((HdfsLocatedFileStatus) listing[i]).getBlockLocations();
locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication();
}
}
// truncate return array if necessary
if (listingCnt < numOfListing) {
listing = Arrays.copyOf(listing, listingCnt);
}
return new DirectoryListing(listing, totalNumChildren - startChild - listingCnt);
} finally {
fsd.readUnlock();
}
}
18
Source : FSDirRenameOp.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* The new rename which has the POSIX semantic.
*/
static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt(FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache, Options.Rename... options) throws IOException {
String src = srcArg;
String dst = dstArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" + " " + src + " to " + dst);
}
if (!DFSUtil.isValidName(dst)) {
throw new InvalidPathException("Invalid name: " + dst);
}
final FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
src = fsd.resolvePath(pc, src, srcComponents);
dst = fsd.resolvePath(pc, dst, dstComponents);
renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);
return new AbstractMap.SimpleImmutableEntry<>(collectedBlocks, resultingStat);
}
18
Source : TestSetStoragePolicyAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
private byte setStoragePolicy(String file, String storagePolicy) throws IOException {
SetStoragePolicyAction action = new SetStoragePolicyAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
Map<String, String> args = new HashMap();
args.put(SetStoragePolicyAction.FILE_PATH, file);
args.put(SetStoragePolicyAction.STORAGE_POLICY, storagePolicy);
action.init(args);
action.run();
replacedert.replacedertTrue(action.getExpectedAfterRun());
HdfsFileStatus fileStatus = dfsClient.getFileInfo(file);
return fileStatus.getStoragePolicy();
}
18
Source : CheckSumAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
protected void execute() throws Exception {
// Use pre-set SmartDFSClient.
// this.setDfsClient(HadoopUtil.getDFSClient(
// HadoopUtil.getNameNodeUri(conf), conf));
if (fileName == null) {
throw new IllegalArgumentException("Please specify file path!");
}
if (fileName.charAt(fileName.length() - 1) == '*') {
DirectoryListing listing = dfsClient.listPaths(fileName.substring(0, fileName.length() - 1), HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] fileList = listing.getPartialListing();
for (HdfsFileStatus fileStatus : fileList) {
String file1 = fileStatus.getFullPath(new Path(fileName.substring(0, fileName.length() - 1))).toString();
HdfsFileStatus fileStatus1 = dfsClient.getFileInfo(file1);
long length = fileStatus1.getLen();
MD5MD5CRC32FileChecksum md5 = dfsClient.getFileChecksum(file1, length);
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(byteStream);
md5.write(dataStream);
byte[] bytes = byteStream.toByteArray();
appendLog(String.format("%s\t%s\t%s", file1, md5.getAlgorithmName(), byteArray2HexString(bytes)));
}
return;
}
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus != null) {
if (fileStatus.isDir()) {
appendResult("This is a directory which has no checksum result!");
appendLog("This is a directory which has no checksum result!");
return;
}
}
long length = fileStatus.getLen();
MD5MD5CRC32FileChecksum md5 = dfsClient.getFileChecksum(fileName, length);
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(byteStream);
md5.write(dataStream);
byte[] bytes = byteStream.toByteArray();
appendLog(String.format("%s\t%s\t%s", fileName, md5.getAlgorithmName(), byteArray2HexString(bytes)));
}
17
Source : Nfs3Utils.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) {
/**
* Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
* client takes only the lower 32bit of the fileId and treats it as signed
* int. When the 32th bit is 1, the client considers it invalid.
*/
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0, /* fsid */
fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
}
17
Source : Nfs3Utils.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public static Nfs3FileAttributes getFileAttr(DFSClient client, String fileIdPath, IdMappingServiceProvider iug) throws IOException {
HdfsFileStatus fs = getFileStatus(client, fileIdPath);
return fs == null ? null : getNfs3FileAttrFromFileStatus(fs, iug);
}
17
Source : Hdfs.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public FileStatus getFileLinkStatus(Path f) throws IOException, UnresolvedLinkException {
HdfsFileStatus fi = dfs.getFileLinkInfo(getUriPath(f));
if (fi != null) {
return fi.makeQualified(getUri(), f);
} else {
throw new FileNotFoundException("File does not exist: " + f);
}
}
17
Source : Hdfs.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException {
HdfsFileStatus fi = dfs.getFileInfo(getUriPath(f));
if (fi != null) {
return fi.makeQualified(getUri(), f);
} else {
throw new FileNotFoundException("File does not exist: " + f.toString());
}
}
17
Source : Nfs3Utils.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) {
/**
* Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
* client takes only the lower 32bit of the fileId and treats it as signed
* int. When the 32th bit is 1, the client considers it invalid.
*/
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs.getChildrenNum()) : fs.getLen();
return new Nfs3FileAttributes(fileType, nlink, fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), size, 0, /* fsid */
fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(), new Nfs3FileAttributes.Specdata3());
}
17
Source : FSDirRenameOp.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
@Deprecated
static RenameOldResult renameToInt(FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache) throws IOException {
String src = srcArg;
String dst = dstArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst);
}
if (!DFSUtil.isValidName(dst)) {
throw new IOException("Invalid name: " + dst);
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
HdfsFileStatus resultingStat = null;
src = fsd.resolvePath(pc, src, srcComponents);
dst = fsd.resolvePath(pc, dst, dstComponents);
@SuppressWarnings("deprecation")
final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
if (status) {
INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
resultingStat = fsd.getAuditFileInfo(dstIIP);
}
return new RenameOldResult(status, resultingStat);
}
17
Source : SmartDFSClient.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
HdfsFileStatus fileStatus = super.getFileLinkInfo(src);
if (fileStatus.getLen() == 0) {
String target = super.getLinkTarget(src);
FileState fileState = getFileState(target);
if (fileState instanceof CompactFileState) {
fileStatus = getFileInfo(target);
}
}
return fileStatus;
}
17
Source : TestMoverExecutor.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
private List<LocatedBlock> getLocatedBlocks(DFSClient dfsClient, String fileName, FileMovePlan plan) throws IOException {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new IOException("File does not exist.");
}
long length = fileStatus.getLen();
if (plan != null) {
plan.setFileLength(length);
}
return dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
}
17
Source : TestMoveFileAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
private List<LocatedBlock> getLocatedBlocks(DFSClient dfsClient, String fileName, FileMovePlan plan) throws IOException {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new IOException("File does not exist.");
}
long length = fileStatus.getLen();
plan.setFileLength(length);
return dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
}
16
Source : TestLazyPersistFiles.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testPolicyNotSetByDefault() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, false);
// Stat the file and check that the LAZY_PERSIST policy is not
// returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
replacedertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
}
16
Source : TestLazyPersistFiles.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
// checkpoint
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
replacedertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
16
Source : TestLazyPersistFiles.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testPolicyPersistenceInEditLog() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
cluster.restartNameNode(true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
replacedertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
16
Source : TestLazyPersistFiles.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testPolicyPropagation() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
replacedertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
16
Source : NamenodeFsck.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void lostFoundInit(DFSClient dfs) {
lfInited = true;
try {
String lfName = "/lost+found";
final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
if (lfStatus == null) {
// not exists
lfInitedOk = dfs.mkdirs(lfName, null, true);
lostFound = lfName;
} else if (!lfStatus.isDir()) {
// exists but not a directory
LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
lfInitedOk = false;
} else {
// exists and is a directory
lostFound = lfName;
lfInitedOk = true;
}
} catch (Exception e) {
e.printStackTrace();
lfInitedOk = false;
}
if (lostFound == null) {
LOG.warn("Cannot initialize /lost+found .");
lfInitedOk = false;
internalError = true;
}
}
15
Source : TestJsonUtil.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testHdfsFileStatus() {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>) JSON.parse(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
replacedert.replacedertEquals(fstatus, fs2);
}
15
Source : TestNamenodeRetryCache.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test for create file
*/
@Test
public void testCreate() throws Exception {
String src = "/testNamenodeRetryCache/testCreate/file";
// Two retried calls succeed
newCall();
HdfsFileStatus status = namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null);
replacedert.replacedertEquals(status, namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null));
replacedert.replacedertEquals(status, namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null));
// A non-retried call fails
newCall();
try {
namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null);
replacedert.fail("testCreate - expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
15
Source : TestNamenodeRetryCache.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Test for create file
*/
@Test
public void testCreate() throws Exception {
String src = "/testNamenodeRetryCache/testCreate/file";
// Two retried calls succeed
newCall();
HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
replacedert.replacedertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
replacedert.replacedertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
// A non-retried call fails
newCall();
try {
nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
replacedert.fail("testCreate - expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
15
Source : ErasureCodingBase.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
protected void convert(HdfsFileStatus srcFileStatus) throws ActionException {
DFSInputStream in = null;
DFSOutputStream out = null;
try {
long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
in = dfsClient.open(srcPath, bufferSize, true);
short replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT);
// use the same FsPermission as srcPath
FsPermission permission = srcFileStatus.getPermission();
out = dfsClient.create(ecTmpPath, permission, EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, null, bufferSize, null, null, ecPolicyName);
// Keep storage policy according with original file except UNDEF storage policy
String storagePolicyName = dfsClient.getStoragePolicy(srcPath).getName();
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(ecTmpPath, storagePolicyName);
}
long bytesRemaining = srcFileStatus.getLen();
byte[] buf = new byte[bufferSize];
while (bytesRemaining > 0L) {
int bytesToRead = (int) (bytesRemaining < (long) buf.length ? bytesRemaining : (long) buf.length);
int bytesRead = in.read(buf, 0, bytesToRead);
if (bytesRead == -1) {
break;
}
out.write(buf, 0, bytesRead);
bytesRemaining -= (long) bytesRead;
this.progress = (float) (srcFileStatus.getLen() - bytesRemaining) / srcFileStatus.getLen();
}
} catch (Exception ex) {
throw new ActionException(ex);
} finally {
try {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
} catch (IOException ex) {
LOG.error("IOException occurred when closing DFSInputStream or DFSOutputStream!");
}
}
}
15
Source : SmallFileUncompactAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
/**
* Reset meta data of small file. We should exclude the setting for
* xAttrNameFileState or xAttrNameCheckSum.
*/
private void resetFileMeta(String path, HdfsFileStatus fileStatus, Map<String, byte[]> xAttr) throws IOException {
dfsClient.setOwner(path, fileStatus.getOwner(), fileStatus.getGroup());
dfsClient.setPermission(path, fileStatus.getPermission());
for (Map.Entry<String, byte[]> entry : xAttr.entrySet()) {
if (!entry.getKey().equals(xAttrNameFileState) && !entry.getKey().equals(xAttrNameCheckSum)) {
dfsClient.setXAttr(path, entry.getKey(), entry.getValue(), EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
}
}
14
Source : TestDFSUpgradeFromImage.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
static void recoverAllLeases(DFSClient dfs, Path path) throws IOException {
String pathStr = path.toString();
HdfsFileStatus status = dfs.getFileInfo(pathStr);
if (!status.isDir()) {
dfs.recoverLease(pathStr);
return;
}
byte[] prev = HdfsFileStatus.EMPTY_NAME;
DirectoryListing dirList;
do {
dirList = dfs.listPaths(pathStr, prev);
HdfsFileStatus[] files = dirList.getPartialListing();
for (HdfsFileStatus f : files) {
recoverAllLeases(dfs, f.getFullPath(path));
}
prev = dirList.getLastName();
} while (dirList.hasMore());
}
14
Source : TestDefaultBlockPlacementPolicy.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void testPlacement(String clientMachine, String clientRack) throws AccessControlException, SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException, NotReplicatedYetException {
// write 5 files and check whether all times block placed
for (int i = 0; i < 5; i++) {
String src = "/test-" + i;
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true, REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null, null, fileStatus.getFileId(), null);
replacedertEquals("Block should be allocated sufficient locations", REPLICATION_FACTOR, locatedBlock.getLocations().length);
if (clientRack != null) {
replacedertEquals("First datanode should be rack local", clientRack, locatedBlock.getLocations()[0].getNetworkLocation());
}
nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(), src, clientMachine);
}
}
14
Source : FileDataServlet.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Create a redirection URL
*/
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException {
String scheme = request.getScheme();
final LocatedBlocks blks = nnproxy.getBlockLocations(status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
final Configuration conf = NameNodeHttpServer.getConfFromContext(getServletContext());
final DatanodeID host = pickSrcDatanode(blks, status, conf);
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = host.getHostName();
} else {
hostname = host.getIpAddr();
}
int port = "https".equals(scheme) ? host.getInfoSecurePort() : host.getInfoPort();
String dtParam = "";
if (dt != null) {
dtParam = JspHelper.getDelegationTokenUrlParam(dt);
}
// Add namenode address to the url params
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(getServletContext());
String addr = nn.getNameNodeAddressHostPortString();
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port, "/streamFile" + encodedPath + '?' + "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) + dtParam + addrParam);
}
14
Source : TestJsonUtil.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
@Test
public void testHdfsFileStatus() throws IOException {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
ObjectReader reader = new ObjectMapper().reader(Map.clreplaced);
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
replacedert.replacedertEquals(fstatus, fs2);
}
14
Source : TestDefaultBlockPlacementPolicy.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
private void testPlacement(String clientMachine, String clientRack) throws IOException {
// write 5 files and check whether all times block placed
for (int i = 0; i < 5; i++) {
String src = "/test-" + i;
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true, REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null, null, fileStatus.getFileId(), null);
replacedertEquals("Block should be allocated sufficient locations", REPLICATION_FACTOR, locatedBlock.getLocations().length);
if (clientRack != null) {
replacedertEquals("First datanode should be rack local", clientRack, locatedBlock.getLocations()[0].getNetworkLocation());
}
nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(), src, clientMachine);
}
}
14
Source : FSDirStatAndListingOp.java
with Apache License 2.0
from naver
with Apache License 2.0
from naver
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, String src, byte[] startAfter) throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = FSDirectory.normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
final INode node = fsd.getINode(dirPath);
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot, BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot));
}
return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
14
Source : TestSmartDFSClient.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Test
public void testSmallFile() throws Exception {
waitTillSSMExitSafeMode();
createSmallFiles();
SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
BlockLocation[] blockLocations = smartDFSClient.getBlockLocations("/test/small_files/file_0", 0, 30);
replacedert.replacedertEquals(blockLocations.length, 1);
HdfsFileStatus fileInfo = smartDFSClient.getFileInfo("/test/small_files/file_0");
replacedert.replacedertEquals(9, fileInfo.getLen());
smartDFSClient.rename("/test/small_files/file_0", "/test/small_files/file_5");
replacedert.replacedertTrue(!dfsClient.exists("/test/small_files/file_0"));
replacedert.replacedertTrue(dfsClient.exists("/test/small_files/file_5"));
smartDFSClient.delete("/test/small_files/file_5", false);
replacedert.replacedertTrue(!dfsClient.exists("/test/small_files/file_5"));
smartDFSClient.delete("/test/small_files", true);
replacedert.replacedertTrue(!dfsClient.exists("/test/small_files/file_1"));
}
14
Source : ErasureCodingAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Override
protected void execute() throws Exception {
final String MATCH_RESULT = "The current EC policy is already matched with the target one.";
final String DIR_RESULT = "The EC policy is set successfully for the given directory.";
final String CONVERT_RESULT = "The file is converted successfully with the given or default EC policy.";
// Make sure DFSClient is used instead of SmartDFSClient.
this.setDfsClient(HadoopUtil.getDFSClient(HadoopUtil.getNameNodeUri(conf), conf));
// keep attribute consistent
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
validateEcPolicy(ecPolicyName);
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if the current ecPolicy is already the target one, no need to convert
if (srcEcPolicy != null) {
if (srcEcPolicy.getName().equals(ecPolicyName)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
} else {
// if ecPolicy is null, it means replication.
if (ecPolicyName.equals(REPLICATION_POLICY_NAME)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
this.progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
HdfsDataOutputStream outputStream = null;
try {
// a file only with replication policy can be appended.
if (srcEcPolicy == null) {
// append the file to acquire the lock to avoid modifying, real appending wouldn't occur.
outputStream = dfsClient.append(srcPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
}
convert(fileStatus);
/**
* The append operation will change the modification time accordingly,
* so we use the FileStatus obtained before append to set ecTmp file's most attributes
*/
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
if (srcEcPolicy == null) {
appendLog("The previous EC policy is replication.");
} else {
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
}
appendLog(String.format("The current EC policy is %s.", ecPolicyName));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
} finally {
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException ex) {
// Hide the expected exception that the original file is missing.
}
}
}
}
13
Source : TestClientAccessPrivilege.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = 60000)
public void testClientAccessPrivilegeForRemove() throws Exception {
// Configure ro access for nfs1 service
config.set("dfs.nfs.exports.allowed.hosts", "* ro");
// Start nfs
Nfs3 nfs = new Nfs3(config);
nfs.startServiceInternal(false);
RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
// Create a remove request
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("f1");
// Remove operation
REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
// replacedert on return code
replacedertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response.getStatus());
}
13
Source : ErasureCodingBase.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
// set attributes for dest to keep them consistent with their counterpart of src
protected void setAttributes(String src, HdfsFileStatus fileStatus, String dest) throws IOException {
dfsClient.setOwner(dest, fileStatus.getOwner(), fileStatus.getGroup());
dfsClient.setPermission(dest, fileStatus.getPermission());
dfsClient.setStoragePolicy(dest, dfsClient.getStoragePolicy(src).getName());
dfsClient.setTimes(dest, fileStatus.getModificationTime(), fileStatus.getAccessTime());
boolean aclsEnabled = getContext().getConf().getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
if (aclsEnabled) {
dfsClient.setAcl(dest, dfsClient.getAclStatus(src).getEntries());
}
// TODO: check ec related record to avoid paradox
for (Map.Entry<String, byte[]> entry : dfsClient.getXAttrs(src).entrySet()) {
dfsClient.setXAttr(dest, entry.getKey(), entry.getValue(), EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
}
12
Source : TestFileStatus.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test calling getFileInfo directly on the client
*/
@Test
public void testGetFileInfo() throws IOException {
// Check that / exists
Path path = new Path("/");
replacedertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
// Make sure getFileInfo returns null for files which do not exist
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
replacedertEquals("Non-existant file should result in null", null, fileInfo);
Path path1 = new Path("/name1");
Path path2 = new Path("/name1/name2");
replacedertTrue(fs.mkdirs(path1));
FSDataOutputStream out = fs.create(path2, false);
out.close();
fileInfo = dfsClient.getFileInfo(path1.toString());
replacedertEquals(1, fileInfo.getChildrenNum());
fileInfo = dfsClient.getFileInfo(path2.toString());
replacedertEquals(0, fileInfo.getChildrenNum());
// Test getFileInfo throws the right exception given a non-absolute path.
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
} catch (RemoteException re) {
replacedertTrue("Wrong exception for invalid file name", re.toString().contains("Invalid file name"));
}
}
12
Source : TestCompressionAction.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
@Test
public void testExecute() throws Exception {
String filePath = "/testCompressFile/fadsfa/213";
int bufferSize = 1024 * 128;
// String compressionImpl = "Lz4";
// String compressionImpl = "Bzip2";
// String compressionImpl = "Zlib";
byte[] bytes = TestCompressionAction.BytesGenerator.get(bufferSize);
short replication = 3;
long blockSize = DEFAULT_BLOCK_SIZE;
// Create HDFS file
OutputStream outputStream = dfsClient.create(filePath, true, replication, blockSize);
outputStream.write(bytes);
outputStream.close();
dfsClient.setStoragePolicy(filePath, "COLD");
HdfsFileStatus srcFileStatus = dfsClient.getFileInfo(filePath);
// Generate compressed file
String bufferSizeForCompression = "10MB";
compression(filePath, bufferSizeForCompression);
// Check HdfsFileStatus
HdfsFileStatus fileStatus = dfsClient.getFileInfo(filePath);
replacedert.replacedertEquals(replication, fileStatus.getReplication());
replacedert.replacedertEquals(blockSize, fileStatus.getBlockSize());
// 0 means unspecified.
if (srcFileStatus.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
replacedert.replacedertEquals(srcFileStatus.getStoragePolicy(), fileStatus.getStoragePolicy());
}
}
12
Source : MoverExecutor.java
with Apache License 2.0
from Intel-bigdata
with Apache License 2.0
from Intel-bigdata
/**
* Execute a move action providing the schedule plan
* @param plan the schedule plan of mover
* @return number of failed moves
* @throws Exception
*/
public int executeMove(FileMovePlan plan, PrintStream resultOs, PrintStream logOs) throws Exception {
if (plan == null) {
throw new RuntimeException("Schedule plan for mover is null");
}
init(plan);
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new RuntimeException("File does not exist.");
}
if (fileStatus.isDir()) {
throw new RuntimeException("File path is a directory.");
}
if ((plan.getFileId() != 0 && fileStatus.getFileId() != plan.getFileId()) || fileStatus.getLen() < plan.getFileLength()) {
throw new RuntimeException("File has been changed after this action generated.");
}
locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, plan.getFileLength()).getLocatedBlocks();
parseSchedulePlan(plan);
concurrentMoves = allMoves.size() >= maxConcurrentMoves ? maxConcurrentMoves : allMoves.size();
concurrentMoves = concurrentMoves == 0 ? 1 : concurrentMoves;
moveExecutor = Executors.newFixedThreadPool(concurrentMoves);
try {
instances.incrementAndGet();
return doMove(resultOs, logOs);
} finally {
instances.decrementAndGet();
moveExecutor.shutdown();
moveExecutor = null;
}
}
11
Source : TestRpcProgramNfs3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = 60000)
public void testLookup() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
XDR xdr_req = new XDR();
lookupReq.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
replacedertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a priviledged user should preplaced.
LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
replacedertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus());
}
11
Source : TestRpcProgramNfs3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = 60000)
public void testRename() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar");
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
replacedertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a privileged user should preplaced.
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
replacedertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
11
Source : TestRpcProgramNfs3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void createFileUsingNfs(String fileName, byte[] buffer) throws Exception {
DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);
final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
final long dirId = status.getFileId();
final FileHandle handle = new FileHandle(dirId);
final WRITE3Request writeReq = new WRITE3Request(handle, 0, buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
final XDR xdr_req = new XDR();
writeReq.serialize(xdr_req);
final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
replacedertEquals("Incorrect response: ", null, response);
}
11
Source : NamenodeFsck.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Check files on DFS, starting from the indicated path.
*/
public void fsck() {
final long startTime = Time.now();
try {
String msg = "FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " for path " + path + " at " + new Date();
LOG.info(msg);
out.println(msg);
namenode.getNamesystem().logFsckEvent(path, remoteAddress);
if (snapshottableDirs != null) {
SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer().getSnapshottableDirListing();
if (snapshotDirs != null) {
for (SnapshottableDirectoryStatus dir : snapshotDirs) {
snapshottableDirs.add(dir.getFullPath().toString());
}
}
}
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
if (file != null) {
if (showCorruptFileBlocks) {
listCorruptFileBlocks();
return;
}
Result res = new Result(conf);
check(path, file, res);
out.println(res);
out.println(" Number of data-nodes:\t\t" + totalDatanodes);
out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
out.println("FSCK ended at " + new Date() + " in " + (Time.now() - startTime + " milliseconds"));
// If there were internal errors during the fsck operation, we want to
// return FAILURE_STATUS, even if those errors were not immediately
// fatal. Otherwise many unit tests will preplaced even when there are bugs.
if (internalError) {
throw new IOException("fsck encountered internal errors!");
}
// DFSck client scans for the string HEALTHY/CORRUPT to check the status
// of file system and return appropriate code. Changing the output
// string might break testcases. Also note this must be the last line
// of the report.
if (res.isHealthy()) {
out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
} else {
out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
}
} else {
out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
}
} catch (Exception e) {
String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
LOG.warn(errMsg, e);
out.println("FSCK ended at " + new Date() + " in " + (Time.now() - startTime + " milliseconds"));
out.println(e.getMessage());
out.print("\n\n" + errMsg);
} finally {
out.close();
}
}
See More Examples