Here are the examples of the java api org.apache.hadoop.io.IOUtils.cleanup() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
239 Examples
19
Source : IndexRRecordReader.java
with Apache License 2.0
from shunfei
with Apache License 2.0
from shunfei
@Override
public void close() throws IOException {
IOUtils.cleanup(LOG, segment);
}
19
Source : FileSystemRMStateStore.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/*
* In order to make this write atomic as a part of write we will first write
* data to .tmp file and then rename it. Here we are replaceduming that rename is
* atomic for underlying file system.
*/
private void writeFile(Path outputPath, byte[] data) throws Exception {
Path tempPath = new Path(outputPath.getParent(), outputPath.getName() + ".tmp");
FSDataOutputStream fsOut = null;
// This file will be overwritten when app/attempt finishes for saving the
// final status.
try {
fsOut = fs.create(tempPath, true);
fsOut.write(data);
fsOut.close();
fsOut = null;
fs.rename(tempPath, outputPath);
} finally {
IOUtils.cleanup(LOG, fsOut);
}
}
19
Source : FileSystemRMStateStore.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private byte[] readFile(Path inputPath, long len) throws Exception {
FSDataInputStream fsIn = null;
try {
fsIn = fs.open(inputPath);
// state data will not be that "long"
byte[] data = new byte[(int) len];
fsIn.readFully(data);
return data;
} finally {
IOUtils.cleanup(LOG, fsIn);
}
}
19
Source : TestLeveldbTimelineStore.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private boolean deleteNextEnreplacedy(String enreplacedyType, byte[] ts) throws IOException, InterruptedException {
LeveldbIterator iterator = null;
LeveldbIterator pfIterator = null;
try {
iterator = ((LeveldbTimelineStore) store).getDbIterator(false);
pfIterator = ((LeveldbTimelineStore) store).getDbIterator(false);
return ((LeveldbTimelineStore) store).deleteNextEnreplacedy(enreplacedyType, ts, iterator, pfIterator, false);
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(null, iterator, pfIterator);
}
}
19
Source : ApplicationMaster.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Dump out contents of $CWD and the environment to stdout for debugging
*/
private void dumpOutDebugInfo() {
LOG.info("Dump debug output");
Map<String, String> envs = System.getenv();
for (Map.Entry<String, String> env : envs.entrySet()) {
LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue());
}
BufferedReader buf = null;
try {
String lines = Shell.WINDOWS ? Shell.execCommand("cmd", "/c", "dir") : Shell.execCommand("ls", "-al");
buf = new BufferedReader(new StringReader(lines));
String line = "";
while ((line = buf.readLine()) != null) {
LOG.info("System CWD content: " + line);
System.out.println("System CWD content: " + line);
}
} catch (IOException e) {
e.printStackTrace();
} finally {
IOUtils.cleanup(LOG, buf);
}
}
19
Source : TraceBuilder.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
void finish() {
IOUtils.cleanup(LOG, traceWriter, topologyWriter);
}
19
Source : ReadRecordFactory.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public void close() throws IOException {
IOUtils.cleanup(null, src);
factory.close();
}
19
Source : Gridmix.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* @param conf gridmix configuration
* @param traceIn trace file path(if it is '-', then trace comes from the
* stream stdin)
* @param ioPath Working directory for gridmix. GenerateData job
* will generate data in the directory <ioPath>/input/ and
* distributed cache data is generated in the directory
* <ioPath>/distributedCache/, if -generate option is
* specified.
* @param genbytes size of input data to be generated under the directory
* <ioPath>/input/
* @param userResolver gridmix user resolver
* @return exit code
* @throws IOException
* @throws InterruptedException
*/
int start(Configuration conf, String traceIn, Path ioPath, long genbytes, UserResolver userResolver) throws IOException, InterruptedException {
DataStatistics stats = null;
InputStream trace = null;
int exitCode = 0;
try {
Path scratchDir = new Path(ioPath, conf.get(GRIDMIX_OUT_DIR, "gridmix"));
// add shutdown hook for SIGINT, etc.
Runtime.getRuntime().addShutdownHook(sdh);
CountDownLatch startFlag = new CountDownLatch(1);
try {
// Create, start job submission threads
startThreads(conf, traceIn, ioPath, scratchDir, startFlag, userResolver);
Path inputDir = getGridmixInputDataPath(ioPath);
// Write input data if specified
exitCode = writeInputData(genbytes, inputDir);
if (exitCode != 0) {
return exitCode;
}
// publish the data statistics
stats = GenerateData.publishDataStatistics(inputDir, genbytes, conf);
// scan input dir contents
submitter.refreshFilePool();
boolean shouldGenerate = (genbytes > 0);
// set up the needed things for emulation of various loads
exitCode = setupEmulation(conf, traceIn, scratchDir, ioPath, shouldGenerate);
if (exitCode != 0) {
return exitCode;
}
// start the summarizer
summarizer.start(conf);
factory.start();
statistics.start();
} catch (Throwable e) {
LOG.error("Startup failed. " + e.toString() + "\n");
if (LOG.isDebugEnabled()) {
e.printStackTrace();
}
// abort pipeline
if (factory != null)
factory.abort();
exitCode = STARTUP_FAILED_ERROR;
} finally {
// signal for factory to start; sets start time
startFlag.countDown();
}
if (factory != null) {
// wait for input exhaustion
factory.join(Long.MAX_VALUE);
final Throwable badTraceException = factory.error();
if (null != badTraceException) {
LOG.error("Error in trace", badTraceException);
throw new IOException("Error in trace", badTraceException);
}
// wait for pending tasks to be submitted
submitter.shutdown();
submitter.join(Long.MAX_VALUE);
// wait for running tasks to complete
monitor.shutdown();
monitor.join(Long.MAX_VALUE);
statistics.shutdown();
statistics.join(Long.MAX_VALUE);
}
} finally {
if (factory != null) {
summarizer.finalize(factory, traceIn, genbytes, userResolver, stats, conf);
}
IOUtils.cleanup(LOG, trace);
}
return exitCode;
}
19
Source : TestGlobbedCopyListing.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static void mkdirs(String path) throws Exception {
FileSystem fileSystem = null;
try {
fileSystem = cluster.getFileSystem();
fileSystem.mkdirs(new Path(path));
recordInExpectedValues(path);
} finally {
IOUtils.cleanup(null, fileSystem);
}
}
19
Source : TestGlobbedCopyListing.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static void touchFile(String path) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
recordInExpectedValues(path);
} finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
19
Source : TestUniformSizeInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static int createFile(String path, int fileSize) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
int size = (int) Math.ceil(fileSize + (1 - random.nextFloat()) * fileSize);
outputStream.write(new byte[size]);
return size;
} finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
19
Source : TestCopyMapper.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static void touchFile(String path, boolean createMultipleBlocks, ChecksumOpt checksumOpt) throws Exception {
FileSystem fs;
DataOutputStream outputStream = null;
try {
fs = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(), fs.getWorkingDirectory());
final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs.getDefaultBlockSize(qualifiedPath) * 2;
FsPermission permission = FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(fs.getConf()));
outputStream = fs.create(qualifiedPath, permission, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0, (short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize, null, checksumOpt);
byte[] bytes = new byte[DEFAULT_FILE_SIZE];
outputStream.write(bytes);
long fileSize = DEFAULT_FILE_SIZE;
if (createMultipleBlocks) {
while (fileSize < 2 * blockSize) {
outputStream.write(bytes);
outputStream.flush();
fileSize += DEFAULT_FILE_SIZE;
}
}
pathList.add(qualifiedPath);
++nFiles;
FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getReplication());
} finally {
IOUtils.cleanup(null, outputStream);
}
}
19
Source : TestDynamicInputFormat.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static void createFile(String path) throws Exception {
FileSystem fileSystem = null;
DataOutputStream outputStream = null;
try {
fileSystem = cluster.getFileSystem();
outputStream = fileSystem.create(new Path(path), true, 0);
expectedFilePaths.add(fileSystem.listStatus(new Path(path))[0].getPath().toString());
} finally {
IOUtils.cleanup(null, fileSystem, outputStream);
}
}
19
Source : SimpleCopyListing.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Collect the list of
* <sourceRelativePath, sourceFileStatus>
* to be copied and write to the sequence file. In essence, any file or
* directory that need to be copied or sync-ed is written as an entry to the
* sequence file, with the possible exception of the source root:
* when either -update (sync) or -overwrite switch is specified, and if
* the the source root is a directory, then the source root entry is not
* written to the sequence file, because only the contents of the source
* directory need to be copied in this case.
* See {@link org.apache.hadoop.tools.util.DistCpUtils#getRelativePath} for
* how relative path is computed.
* See computeSourceRootPath method for how the root path of the source is
* computed.
* @param fileListWriter
* @param options
* @throws IOException
*/
@VisibleForTesting
public void doBuildListing(SequenceFile.Writer fileListWriter, DistCpOptions options) throws IOException {
try {
for (Path path : options.getSourcePaths()) {
FileSystem sourceFS = path.getFileSystem(getConf());
final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
path = makeQualified(path);
FileStatus rootStatus = sourceFS.getFileStatus(path);
Path sourcePathRoot = computeSourceRootPath(rootStatus, options);
FileStatus[] sourceFiles = sourceFS.listStatus(path);
boolean explore = (sourceFiles != null && sourceFiles.length > 0);
if (!explore || rootStatus.isDirectory()) {
CopyListingFileStatus rootCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus, preserveAcls, preserveXAttrs, preserveRawXAttrs);
writeToFileListingRoot(fileListWriter, rootCopyListingStatus, sourcePathRoot, options);
}
if (explore) {
for (FileStatus sourceStatus : sourceFiles) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy.");
}
CopyListingFileStatus sourceCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus, preserveAcls && sourceStatus.isDirectory(), preserveXAttrs && sourceStatus.isDirectory(), preserveRawXAttrs && sourceStatus.isDirectory());
writeToFileListing(fileListWriter, sourceCopyListingStatus, sourcePathRoot, options);
if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Traversing non-empty source dir: " + sourceStatus.getPath());
}
traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot, options);
}
}
}
}
fileListWriter.close();
fileListWriter = null;
} finally {
IOUtils.cleanup(LOG, fileListWriter);
}
}
19
Source : RetriableFileCopyCommand.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@VisibleForTesting
long copyBytes(FileStatus sourceFileStatus, long sourceOffset, OutputStream outStream, int bufferSize, Mapper.Context context) throws IOException {
Path source = sourceFileStatus.getPath();
byte[] buf = new byte[bufferSize];
ThrottledInputStream inStream = null;
long totalBytesRead = 0;
try {
inStream = getInputStream(source, context.getConfiguration());
int bytesRead = readBytes(inStream, buf, sourceOffset);
while (bytesRead >= 0) {
totalBytesRead += bytesRead;
if (action == FileAction.APPEND) {
sourceOffset += bytesRead;
}
outStream.write(buf, 0, bytesRead);
updateContextStatus(totalBytesRead, context, sourceFileStatus);
bytesRead = readBytes(inStream, buf, sourceOffset);
}
outStream.close();
outStream = null;
} finally {
IOUtils.cleanup(LOG, outStream, inStream);
}
return totalBytesRead;
}
19
Source : DynamicInputChunk.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Closes streams opened to the chunk-file.
*/
public void close() {
IOUtils.cleanup(LOG, reader, writer);
}
19
Source : HistoryServerFileSystemStateStoreService.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private byte[] readFile(Path file, long numBytes) throws IOException {
byte[] data = new byte[(int) numBytes];
FSDataInputStream in = fs.open(file);
try {
in.readFully(data);
} finally {
IOUtils.cleanup(LOG, in);
}
return data;
}
19
Source : HistoryServerFileSystemStateStoreService.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void writeFile(Path file, byte[] data) throws IOException {
final int WRITE_BUFFER_SIZE = 4096;
FSDataOutputStream out = fs.create(file, FILE_PERMISSIONS, true, WRITE_BUFFER_SIZE, fs.getDefaultReplication(file), fs.getDefaultBlockSize(file), null);
try {
try {
out.write(data);
out.close();
out = null;
} finally {
IOUtils.cleanup(LOG, out);
}
} catch (IOException e) {
fs.delete(file, false);
throw e;
}
}
19
Source : OnDiskMapOutput.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public void shuffle(MapHost host, InputStream input, long compressedLength, long decompressedLength, ShuffleClientMetrics metrics, Reporter reporter) throws IOException {
input = new IFileInputStream(input, compressedLength, conf);
// Copy data to local-disk
long bytesLeft = compressedLength;
try {
final int BYTES_TO_READ = 64 * 1024;
byte[] buf = new byte[BYTES_TO_READ];
while (bytesLeft > 0) {
int n = ((IFileInputStream) input).readWithChecksum(buf, 0, (int) Math.min(bytesLeft, BYTES_TO_READ));
if (n < 0) {
throw new IOException("read past end of stream reading " + getMapId());
}
disk.write(buf, 0, n);
bytesLeft -= n;
metrics.inputBytes(n);
reporter.progress();
}
LOG.info("Read " + (compressedLength - bytesLeft) + " bytes from map-output for " + getMapId());
disk.close();
} catch (IOException ioe) {
// Close the streams
IOUtils.cleanup(LOG, input, disk);
// Re-throw
throw ioe;
}
// Sanity check
if (bytesLeft != 0) {
throw new IOException("Incomplete map output received for " + getMapId() + " from " + host.getHostName() + " (" + bytesLeft + " bytes missing of " + compressedLength + ")");
}
this.compressedSize = compressedLength;
}
19
Source : InMemoryMapOutput.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Override
public void shuffle(MapHost host, InputStream input, long compressedLength, long decompressedLength, ShuffleClientMetrics metrics, Reporter reporter) throws IOException {
IFileInputStream checksumIn = new IFileInputStream(input, compressedLength, conf);
input = checksumIn;
// Are map-outputs compressed?
if (codec != null) {
decompressor.reset();
input = codec.createInputStream(input, decompressor);
}
try {
IOUtils.readFully(input, memory, 0, memory.length);
metrics.inputBytes(memory.length);
reporter.progress();
LOG.info("Read " + memory.length + " bytes from map-output for " + getMapId());
/**
* We've gotten the amount of data we were expecting. Verify the
* decompressor has nothing more to offer. This action also forces the
* decompressor to read any trailing bytes that weren't critical
* for decompression, which is necessary to keep the stream
* in sync.
*/
if (input.read() >= 0) {
throw new IOException("Unexpected extra bytes from input stream for " + getMapId());
}
} catch (IOException ioe) {
// Close the streams
IOUtils.cleanup(LOG, input);
// Re-throw
throw ioe;
} finally {
CodecPool.returnDecompressor(decompressor);
}
}
19
Source : EventWriter.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
void close() throws IOException {
try {
encoder.flush();
out.close();
out = null;
} finally {
IOUtils.cleanup(LOG, out);
}
}
19
Source : TaskLog.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, LogName filter, boolean isCleanup) throws IOException {
File indexFile = getIndexFile(taskid, isCleanup);
BufferedReader fis = new BufferedReader(new InputStreamReader(SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null), Charsets.UTF_8));
// the format of the index file is
// LOG_DIR: <the dir where the task logs are really stored>
// stdout:<start-offset in the stdout file> <length>
// stderr:<start-offset in the stderr file> <length>
// syslog:<start-offset in the syslog file> <length>
LogFileDetail l = new LogFileDetail();
String str = null;
try {
str = fis.readLine();
if (str == null) {
// the file doesn't have anything
throw new IOException("Index file for the log of " + taskid + " doesn't exist.");
}
l.location = str.substring(str.indexOf(LogFileDetail.LOCATION) + LogFileDetail.LOCATION.length());
// special cases are the debugout and profile.out files. They are
// guaranteed
// to be replacedociated with each task attempt since jvm reuse is disabled
// when profiling/debugging is enabled
if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) {
l.length = new File(l.location, filter.toString()).length();
l.start = 0;
fis.close();
return l;
}
str = fis.readLine();
while (str != null) {
// look for the exact line containing the logname
if (str.contains(filter.toString())) {
str = str.substring(filter.toString().length() + 1);
String[] startAndLen = str.split(" ");
l.start = Long.parseLong(startAndLen[0]);
l.length = Long.parseLong(startAndLen[1]);
break;
}
str = fis.readLine();
}
fis.close();
fis = null;
} finally {
IOUtils.cleanup(LOG, fis);
}
return l;
}
19
Source : TaskLog.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static synchronized void writeToIndexFile(String logLocation, boolean isCleanup) throws IOException {
// To ensure atomicity of updates to index file, write to temporary index
// file first and then rename.
File tmpIndexFile = getTmpIndexFile(currentTaskid, isCleanup);
BufferedOutputStream bos = null;
DataOutputStream dos = null;
try {
bos = new BufferedOutputStream(SecureIOUtils.createForWrite(tmpIndexFile, 0644));
dos = new DataOutputStream(bos);
// the format of the index file is
// LOG_DIR: <the dir where the task logs are really stored>
// STDOUT: <start-offset in the stdout file> <length>
// STDERR: <start-offset in the stderr file> <length>
// SYSLOG: <start-offset in the syslog file> <length>
dos.writeBytes(LogFileDetail.LOCATION + logLocation + "\n" + LogName.STDOUT.toString() + ":");
dos.writeBytes(Long.toString(prevOutLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.STDOUT.toString()).length() - prevOutLength) + "\n" + LogName.STDERR + ":");
dos.writeBytes(Long.toString(prevErrLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.STDERR.toString()).length() - prevErrLength) + "\n" + LogName.SYSLOG.toString() + ":");
dos.writeBytes(Long.toString(prevLogLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.SYSLOG.toString()).length() - prevLogLength) + "\n");
dos.close();
dos = null;
bos.close();
bos = null;
} finally {
IOUtils.cleanup(LOG, dos, bos);
}
File indexFile = getIndexFile(currentTaskid, isCleanup);
Path indexFilePath = new Path(indexFile.getAbsolutePath());
Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath());
if (localFS == null) {
// set localFS once
localFS = FileSystem.getLocal(new Configuration());
}
localFS.rename(tmpIndexFilePath, indexFilePath);
}
19
Source : YarnChild.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Write the task specific job-configuration file.
* @throws IOException
*/
private static void writeLocalJobFile(Path jobFile, JobConf conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
localFs.delete(jobFile);
OutputStream out = null;
try {
out = FileSystem.create(localFs, jobFile, urw_gr);
conf.writeXml(out);
} finally {
IOUtils.cleanup(LOG, out);
}
}
19
Source : OpenFileCtx.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Honor 2 kinds of overwrites: 1). support some application like touch(write
* the same content back to change mtime), 2) client somehow sends the same
* write again in a different RPC.
*/
private WRITE3Response processPerfectOverWrite(DFSClient dfsClient, long offset, int count, WriteStableHow stableHow, byte[] data, String path, WccData wccData, IdMappingServiceProvider iug) {
WRITE3Response response;
// Read the content back
byte[] readbuffer = new byte[count];
int readCount = 0;
FSDataInputStream fis = null;
try {
// Sync file data and length to avoid partial read failure
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
} catch (ClosedChannelException closedException) {
LOG.info("The FSDataOutputStream has been closed. " + "Continue processing the perfect overwrite.");
} catch (IOException e) {
LOG.info("hsync failed when processing possible perfect overwrite, path=" + path + " error:" + e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
try {
fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
readCount = fis.read(offset, readbuffer, 0, count);
if (readCount < count) {
LOG.error("Can't read back " + count + " bytes, partial read size:" + readCount);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
} catch (IOException e) {
LOG.info("Read failed when processing possible perfect overwrite, path=" + path, e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
} finally {
IOUtils.cleanup(LOG, fis);
}
// Compare with the request
Comparator comparator = new Comparator();
if (comparator.compare(readbuffer, 0, readCount, data, 0, count) != 0) {
LOG.info("Perfect overwrite has different content");
response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
} else {
LOG.info("Perfect overwrite has same content," + " updating the mtime, then return success");
Nfs3FileAttributes postOpAttr = null;
try {
dfsClient.setTimes(path, Time.monotonicNow(), -1);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
} catch (IOException e) {
LOG.info("Got error when processing perfect overwrite, path=" + path + " error:" + e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
wccData.setPostOpAttr(postOpAttr);
response = new WRITE3Response(Nfs3Status.NFS3_OK, wccData, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
return response;
}
19
Source : TestWebHdfsTimeouts.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@After
public void tearDown() throws Exception {
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
IOUtils.cleanup(LOG, fs);
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
LOG.debug("Exception in closing " + serverSocket, e);
}
}
if (serverThread != null) {
serverThread.join();
}
}
19
Source : TestOfflineImageViewer.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void copyPartOfFile(File src, File dest) throws IOException {
FileInputStream in = null;
FileOutputStream out = null;
final int MAX_BYTES = 700;
try {
in = new FileInputStream(src);
out = new FileOutputStream(dest);
in.getChannel().transferTo(0, MAX_BYTES, out.getChannel());
} finally {
IOUtils.cleanup(null, in);
IOUtils.cleanup(null, out);
}
}
19
Source : TestRollingUpgrade.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* In non-HA setup, after rolling upgrade prepare, the Secondary NN should
* still be able to do checkpoint
*/
@Test
public void testCheckpointWithSNN() throws Exception {
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
SecondaryNameNode snn = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
snn = new SecondaryNameNode(conf);
dfs = cluster.getFileSystem();
dfs.mkdirs(new Path("/test/foo"));
snn.doCheckpoint();
// start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(new Path("/test/bar"));
// do checkpoint in SNN again
snn.doCheckpoint();
} finally {
IOUtils.cleanup(null, dfs);
if (snn != null) {
snn.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
19
Source : TestDFSClientRetries.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src = "/testIdempotentAllocateBlock";
Path file = new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Make the call to addBlock() get called twice, as if it were retried
// due to an IPC issue.
doAnswer(new Answer<LocatedBlock>() {
@Override
public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
int blockCount = lb.getLocatedBlocks().size();
replacedertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
// Retrying should result in a new block at the end of the file.
// (abandoning the old one)
LocatedBlock ret2 = (LocatedBlock) invocation.callRealMethod();
lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
int blockCount2 = lb.getLocatedBlocks().size();
replacedertEquals(lb.getLastLocatedBlock().getBlock(), ret2.getBlock());
// We shouldn't have gained an extra block by the RPC.
replacedertEquals(blockCount, blockCount2);
return ret2;
}
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any());
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
// complete() may return false a few times before it returns
// true. We want to wait until it returns true, and then
// make it retry one more time after that.
LOG.info("Called complete(: " + Joiner.on(",").join(invocation.getArguments()) + ")");
if (!(Boolean) invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
// We got a successful close. Call it again to check idempotence.
try {
boolean ret = (Boolean) invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
return ret;
} catch (Throwable t) {
LOG.error("Idempotent retry threw exception", t);
throw t;
}
}
}).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
OutputStream stm = client.create(file.toString(), true);
try {
AppendTestUtil.write(stm, 0, 10000);
stm.close();
stm = null;
} finally {
IOUtils.cleanup(LOG, stm);
}
// Make sure the mock was actually properly injected.
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any());
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
AppendTestUtil.check(fs, file, 10000);
} finally {
cluster.shutdown();
}
}
19
Source : TestXAttrConfigFlag.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@After
public void shutdown() throws Exception {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
19
Source : TestSaveNamespace.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test for save namespace should succeed when parent directory renamed with
* open lease and destination directory exist.
* This test is a regression for HDFS-2827
*/
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
OutputStream out = null;
try {
fs.mkdirs(new Path("/test-target"));
// don't close
out = fs.create(new Path("/test-source/foo"));
fs.rename(new Path("/test-source/"), new Path("/test-target/"));
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
} finally {
IOUtils.cleanup(LOG, out, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
19
Source : TestMetaSave.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Tests that metasave overwrites the output file (not append).
*/
@Test
public void testMetaSaveOverwrite() throws Exception {
// metaSave twice.
namesystem.metaSave("metaSaveOverwrite.out.txt");
namesystem.metaSave("metaSaveOverwrite.out.txt");
// Read output file.
FileInputStream fis = null;
InputStreamReader isr = null;
BufferedReader rdr = null;
try {
fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt"));
isr = new InputStreamReader(fis);
rdr = new BufferedReader(isr);
// Validate that file was overwritten (not appended) by checking for
// presence of only one "Live Datanodes" line.
boolean foundLiveDatanodesLine = false;
String line = rdr.readLine();
while (line != null) {
if (line.startsWith("Live Datanodes")) {
if (foundLiveDatanodesLine) {
fail("multiple Live Datanodes lines, output file not overwritten");
}
foundLiveDatanodesLine = true;
}
line = rdr.readLine();
}
} finally {
IOUtils.cleanup(null, rdr, isr, fis);
}
}
19
Source : TestFileJournalManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Tests that internal renames are done using native code on platforms that
* have it. The native rename includes more detailed information about the
* failure, which can be useful for troubleshooting.
*/
@Test
public void testDoPreUpgradeIOError() throws IOException {
File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
List<URI> editUris = Collections.singletonList(storageDir.toURI());
NNStorage storage = setupEdits(editUris, 5);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
replacedertNotNull(sd);
// Change storage directory so that renaming current to previous.tmp fails.
FileUtil.setWritable(storageDir, false);
FileJournalManager jm = null;
try {
jm = new FileJournalManager(conf, sd, storage);
exception.expect(IOException.clreplaced);
if (NativeCodeLoader.isNativeCodeLoaded()) {
exception.expectMessage("failure in native rename");
}
jm.doPreUpgrade();
} finally {
IOUtils.cleanup(LOG, jm);
// Restore permissions on storage directory and make sure we can delete.
FileUtil.setWritable(storageDir, true);
FileUtil.fullyDelete(storageDir);
}
}
19
Source : TestFileJournalManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test
public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
// Don't close the edit log once the files have been set up.
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, false);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// If we exclude the in-progess stream, we should only have 100 tx.
replacedertEquals(100, getNumberOfTransactions(jm, 1, false, false));
EditLogInputStream elis = getJournalInputStream(jm, 90, false);
try {
FSEditLogOp lastReadOp = null;
while ((lastReadOp = elis.readOp()) != null) {
replacedertTrue(lastReadOp.getTransactionId() <= 100);
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
19
Source : TestFileJournalManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Make sure that we starting reading the correct op when we request a stream
* with a txid in the middle of an edit log file.
*/
@Test
public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException {
File f = new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
EditLogInputStream elis = getJournalInputStream(jm, 5, true);
try {
FSEditLogOp op = elis.readOp();
replacedertEquals("read unexpected op", op.getTransactionId(), 5);
} finally {
IOUtils.cleanup(LOG, elis);
}
}
19
Source : TestFileJournalManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static EditLogInputStream getJournalInputStream(JournalManager jm, long txId, boolean inProgressOk) throws IOException {
final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, txId, inProgressOk);
EditLogInputStream elis = null, ret;
try {
while ((elis = allStreams.poll()) != null) {
if (elis.getFirstTxId() > txId) {
break;
}
if (elis.getLastTxId() < txId) {
elis.close();
continue;
}
elis.skipUntil(txId);
ret = elis;
elis = null;
return ret;
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
IOUtils.cleanup(LOG, elis);
}
return null;
}
19
Source : TestFileJournalManager.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Find out how many transactions we can read from a
* FileJournalManager, starting at a given transaction ID.
*
* @param jm The journal manager
* @param fromTxId Transaction ID to start at
* @param inProgressOk Should we consider edit logs that are not finalized?
* @return The number of transactions
* @throws IOException
*/
static long getNumberOfTransactions(FileJournalManager jm, long fromTxId, boolean inProgressOk, boolean abortOnGap) throws IOException {
long numTransactions = 0, txId = fromTxId;
final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
EditLogInputStream elis = null;
try {
while ((elis = allStreams.poll()) != null) {
try {
elis.skipUntil(txId);
while (true) {
FSEditLogOp op = elis.readOp();
if (op == null) {
break;
}
if (abortOnGap && (op.getTransactionId() != txId)) {
LOG.info("getNumberOfTransactions: detected gap at txId " + fromTxId);
return numTransactions;
}
txId = op.getTransactionId() + 1;
numTransactions++;
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
}
return numTransactions;
}
19
Source : TestEditLogFileOutputStream.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on being
* abort/abort sequence. See HDFS-2011.
*/
@Test
public void testEditLogFileOutputStreamAbortAbort() throws IOException {
// abort after a close should just ignore
EditLogFileOutputStream editLogStream = null;
try {
editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0);
editLogStream.abort();
editLogStream.abort();
} finally {
IOUtils.cleanup(null, editLogStream);
}
}
19
Source : TestHAStateTransitions.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static void testFailoverAfterCrashDuringLogRoll(boolean writeHeader) throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, Integer.MAX_VALUE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
try {
cluster.transitionToActive(0);
NameNode nn0 = cluster.getNameNode(0);
nn0.getRpcServer().rollEditLog();
cluster.shutdownNameNode(0);
createEmptyInProgressEditLog(cluster, nn0, writeHeader);
cluster.transitionToActive(1);
} finally {
IOUtils.cleanup(LOG, fs);
cluster.shutdown();
}
}
19
Source : FSXAttrBaseTest.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@After
public void destroyFileSystems() {
IOUtils.cleanup(null, fs);
fs = null;
}
19
Source : FSImageTestUtil.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Corrupt the given VERSION file by replacing a given
* key with a new value and re-writing the file.
*
* @param versionFile the VERSION file to corrupt
* @param key the key to replace
* @param value the new value for this key
*/
public static void corruptVersionFile(File versionFile, String key, String value) throws IOException {
Properties props = new Properties();
FileInputStream fis = new FileInputStream(versionFile);
FileOutputStream out = null;
try {
props.load(fis);
IOUtils.closeStream(fis);
if (value == null || value.isEmpty()) {
props.remove(key);
} else {
props.setProperty(key, value);
}
out = new FileOutputStream(versionFile);
props.store(out, null);
} finally {
IOUtils.cleanup(null, fis, out);
}
}
19
Source : TestStorageMover.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Move an open file into archival storage
*/
@Test
public void testMigrateOpenFileToArchival() throws Exception {
LOG.info("testMigrateOpenFileToArchival");
final Path fooDir = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(fooDir, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
// create an open file
banner("writing to file /foo/bar");
final Path barFile = new Path(fooDir, "bar");
DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
FSDataOutputStream out = test.dfs.append(barFile);
out.writeBytes("hello, ");
((DFSOutputStream) out.getWrappedStream()).hsync();
try {
banner("start data migration");
// set /foo to COLD
test.setStoragePolicy();
test.migrate();
// make sure the under construction block has not been migrated
LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
List<LocatedBlock> blks = lbs.getLocatedBlocks();
replacedert.replacedertEquals(1, blks.size());
replacedert.replacedertEquals(1, blks.get(0).getLocations().length);
banner("finish the migration, continue writing");
// make sure the writing can continue
out.writeBytes("world!");
((DFSOutputStream) out.getWrappedStream()).hsync();
IOUtils.cleanup(LOG, out);
lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
blks = lbs.getLocatedBlocks();
replacedert.replacedertEquals(1, blks.size());
replacedert.replacedertEquals(1, blks.get(0).getLocations().length);
banner("finish writing, starting reading");
// check the content of /foo/bar
FSDataInputStream in = test.dfs.open(barFile);
byte[] buf = new byte[13];
// read from offset 1024
in.readFully(BLOCK_SIZE, buf, 0, buf.length);
IOUtils.cleanup(LOG, in);
replacedert.replacedertEquals("hello, world!", new String(buf));
} finally {
test.shutdownCluster();
}
}
19
Source : TestFsDatasetCache.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static long[] getBlockSizes(HdfsBlockLocation[] locs) throws Exception {
long[] sizes = new long[locs.length];
for (int i = 0; i < locs.length; i++) {
HdfsBlockLocation loc = locs[i];
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
FileInputStream blockInputStream = null;
FileChannel blockChannel = null;
try {
blockInputStream = (FileInputStream) fsd.getBlockInputStream(extBlock, 0);
blockChannel = blockInputStream.getChannel();
sizes[i] = blockChannel.size();
} finally {
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
}
}
return sizes;
}
19
Source : TestDirectoryScanner.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Truncate a block file
*/
private long truncateBlockFile() throws IOException {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = null;
FileChannel channel = null;
try {
s = new FileOutputStream(f);
channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
return b.getBlockId();
} finally {
IOUtils.cleanup(LOG, channel, s);
}
}
}
}
return 0;
}
19
Source : TestBlockReplacement.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Standby namenode doesn't queue Delete block request when the add block
* request is in the edit log which are yet to be read.
* @throws Exception
*/
@Test
public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
DFSClient client = null;
try {
cluster.waitActive();
replacedertEquals("Number of namenodes is not 2", 2, cluster.getNumNameNodes());
// Transitioning the namenode 0 to active.
cluster.transitionToActive(0);
replacedertTrue("Namenode 0 should be in active state", cluster.getNameNode(0).isActiveState());
replacedertTrue("Namenode 1 should be in standby state", cluster.getNameNode(1).isStandbyState());
// Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
// to true.
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0));
FileSystem fs = cluster.getFileSystem(0);
// Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
// to false.
cluster.getDataNodes().get(0).triggerBlockReport(new BlockReportOptions.Factory().setIncremental(false).build());
Path fileName = new Path("/tmp.txt");
// create a file with one block
DFSTestUtil.createFile(fs, fileName, 10L, (short) 1, 1234L);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
client = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
List<LocatedBlock> locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
replacedertTrue(locatedBlocks.size() == 1);
replacedertTrue(locatedBlocks.get(0).getLocations().length == 1);
// add a second datanode to the cluster
cluster.startDataNodes(conf, 1, true, null, null, null, null);
replacedertEquals("Number of datanodes should be 2", 2, cluster.getDataNodes().size());
DataNode dn0 = cluster.getDataNodes().get(0);
DataNode dn1 = cluster.getDataNodes().get(1);
String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId();
DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId));
DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId));
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
LOG.info("replaceBlock: " + replaceBlock(block, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) destDnDesc));
// Waiting for the FsDatasetAsyncDsikService to delete the block
Thread.sleep(3000);
// Triggering the incremental block report to report the deleted block to
// namnemode
cluster.getDataNodes().get(0).triggerBlockReport(new BlockReportOptions.Factory().setIncremental(true).build());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
replacedertTrue("Namenode 1 should be in active state", cluster.getNameNode(1).isActiveState());
replacedertTrue("Namenode 0 should be in standby state", cluster.getNameNode(0).isStandbyState());
client.close();
// Opening a new client for new active namenode
client = new DFSClient(cluster.getFileSystem(1).getUri(), conf);
List<LocatedBlock> locatedBlocks1 = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
replacedertEquals(1, locatedBlocks1.size());
replacedertEquals("The block should be only on 1 datanode ", 1, locatedBlocks1.get(0).getLocations().length);
} finally {
IOUtils.cleanup(null, client);
cluster.shutdown();
}
}
19
Source : TestRBWBlockInvalidation.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Regression test for HDFS-4799, a case where, upon restart, if there
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
@Test(timeout = 60000)
public void testRWRInvalidation() throws Exception {
Configuration conf = new HdfsConfiguration();
// Set the deletion policy to be randomized rather than the default.
// The default is based on disk space, which isn't controllable
// in the context of the test, whereas a random one is more accurate
// to what is seen in real clusters (nodes have random amounts of free
// space)
conf.setClreplaced(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLreplacedNAME_KEY, RandomDeleterPolicy.clreplaced, BlockPlacementPolicy.clreplaced);
// Speed up the test a bit with faster heartbeats.
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// Test with a bunch of separate files, since otherwise the test may
// fail just due to "good luck", even if a bug is present.
List<Path> testPaths = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
testPaths.add(new Path("/test" + i));
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
List<FSDataOutputStream> streams = Lists.newArrayList();
try {
// Open the test files and write some data to each
for (Path path : testPaths) {
FSDataOutputStream out = cluster.getFileSystem().create(path, (short) 2);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
// Shutdown one of the nodes in the pipeline
DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);
// Write some more data and flush again. This data will only
// be in the latter genstamp copy of the blocks.
for (int i = 0; i < streams.size(); i++) {
Path path = testPaths.get(i);
FSDataOutputStream out = streams.get(i);
out.writeBytes("new gs data\n");
out.hflush();
// Set replication so that only one node is necessary for this block,
// and close it.
cluster.getFileSystem().setReplication(path, (short) 1);
out.close();
}
// Upon restart, there will be two replicas, one with an old genstamp
// and one current copy. This test wants to ensure that the old genstamp
// copy is the one that is deleted.
LOG.info("=========================== restarting cluster");
DataNodeProperties otherNode = cluster.stopDataNode(0);
cluster.restartNameNode();
// Restart the datanode with the corrupt replica first.
cluster.restartDataNode(oldGenstampNode);
cluster.waitActive();
// Then the other node
cluster.restartDataNode(otherNode);
cluster.waitActive();
// Compute and send invalidations, waiting until they're fully processed.
cluster.getNameNode().getNamesystem().getBlockManager().computeInvalidateWork(2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
// Make sure we can still read the blocks.
for (Path path : testPaths) {
String ret = DFSTestUtil.readFile(cluster.getFileSystem(), path);
replacedertEquals("old gs data\n" + "new gs data\n", ret);
}
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
}
} finally {
cluster.shutdown();
}
}
19
Source : TestBalancer.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private static int runBalancer(Collection<URI> namenodes, final Parameters p, Configuration conf) throws IOException, InterruptedException {
final long sleeptime = conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
LOG.info("namenodes = " + namenodes);
LOG.info("parameters = " + p);
LOG.info("Print stack trace", new Throwable());
System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved");
List<NameNodeConnector> connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes, Balancer.clreplaced.getSimpleName(), Balancer.BALANCER_ID_PATH, conf);
boolean done = false;
for (int iteration = 0; !done; iteration++) {
done = true;
Collections.shuffle(connectors);
for (NameNodeConnector nnc : connectors) {
final Balancer b = new Balancer(nnc, p, conf);
final Result r = b.runOneIteration();
r.print(iteration, System.out);
// clean all lists
b.resetData(conf);
if (r.exitStatus == ExitStatus.IN_PROGRESS) {
done = false;
} else if (r.exitStatus != ExitStatus.SUCCESS) {
// must be an error statue, return.
return r.exitStatus.getExitCode();
} else {
if (iteration > 0) {
replacedertTrue(r.bytesAlreadyMoved > 0);
}
}
}
if (!done) {
Thread.sleep(sleeptime);
}
}
} finally {
for (NameNodeConnector nnc : connectors) {
IOUtils.cleanup(LOG, nnc);
}
}
return ExitStatus.SUCCESS.getExitCode();
}
19
Source : TestSecureNNWithQJM.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Restarts the NameNode and obtains a new FileSystem.
*
* @throws IOException if there is an I/O error
*/
private void restartNameNode() throws IOException {
IOUtils.cleanup(null, fs);
cluster.restartNameNode();
fs = cluster.getFileSystem();
}
19
Source : TestSecureNNWithQJM.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@After
public void shutdown() throws IOException {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
19
Source : QJMTestUtil.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public static long recoverAndReturnLastTxn(QuorumJournalManager qjm) throws IOException {
qjm.recoverUnfinalizedSegments();
long lastRecoveredTxn = 0;
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false);
for (EditLogInputStream elis : streams) {
replacedertTrue(elis.getFirstTxId() > lastRecoveredTxn);
lastRecoveredTxn = elis.getLastTxId();
}
} finally {
IOUtils.cleanup(null, streams.toArray(new Closeable[0]));
}
return lastRecoveredTxn;
}
See More Examples