Here are the examples of the java api org.apache.hadoop.fs.FSDataOutputStream taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
809 Examples
19
Source : SpoolingRawBatchBuffer.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
/**
* This implementation of RawBatchBuffer starts writing incoming batches to disk once the buffer size reaches a threshold.
* The order of the incoming buffers is maintained.
*/
public clreplaced SpoolingRawBatchBuffer extends BaseRawBatchBuffer<SpoolingRawBatchBuffer.RawFragmentBatchWrapper> {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SpoolingRawBatchBuffer.clreplaced);
private static String DRILL_LOCAL_IMPL_STRING = "fs.drill-local.impl";
private static final float STOP_SPOOLING_FRACTION = (float) 0.5;
public static final long ALLOCATOR_INITIAL_RESERVATION = 1 * 1024 * 1024;
public static final long ALLOCATOR_MAX_RESERVATION = 20L * 1000 * 1000 * 1000;
private enum SpoolingState {
NOT_SPOOLING, SPOOLING, PAUSE_SPOOLING, STOP_SPOOLING
}
private final BufferAllocator allocator;
private final long threshold;
private final int oppositeId;
private final int bufferIndex;
private volatile SpoolingState spoolingState;
private volatile long currentSizeInMemory = 0;
private volatile Spooler spooler;
private FileSystem fs;
private Path path;
private FSDataOutputStream outputStream;
public SpoolingRawBatchBuffer(FragmentContext context, int fragmentCount, int oppositeId, int bufferIndex) {
super(context, fragmentCount);
this.allocator = context.getNewChildAllocator("SpoolingRawBatchBufer", 100, ALLOCATOR_INITIAL_RESERVATION, ALLOCATOR_MAX_RESERVATION);
this.threshold = context.getConfig().getLong(ExecConstants.SPOOLING_BUFFER_MEMORY);
this.oppositeId = oppositeId;
this.bufferIndex = bufferIndex;
this.bufferQueue = new SpoolingBufferQueue();
}
private clreplaced SpoolingBufferQueue implements BufferQueue<RawFragmentBatchWrapper> {
private final LinkedBlockingDeque<RawFragmentBatchWrapper> buffer = Queues.newLinkedBlockingDeque();
@Override
public void addOomBatch(RawFragmentBatch batch) {
RawFragmentBatchWrapper batchWrapper = new RawFragmentBatchWrapper(batch, true);
batchWrapper.setOutOfMemory(true);
buffer.addFirst(batchWrapper);
}
@Override
public RawFragmentBatch poll() throws IOException {
RawFragmentBatchWrapper batchWrapper = buffer.poll();
if (batchWrapper != null) {
try {
return batchWrapper.get();
} catch (InterruptedException e) {
return null;
}
}
return null;
}
@Override
public RawFragmentBatch take() throws IOException, InterruptedException {
return buffer.take().get();
}
@Override
public boolean checkForOutOfMemory() {
return buffer.peek().isOutOfMemory();
}
@Override
public int size() {
return buffer.size();
}
@Override
public boolean isEmpty() {
return buffer.size() == 0;
}
public void add(RawFragmentBatchWrapper batchWrapper) {
buffer.add(batchWrapper);
}
}
private synchronized void setSpoolingState(SpoolingState newState) {
SpoolingState currentState = spoolingState;
if (newState == SpoolingState.NOT_SPOOLING || currentState == SpoolingState.STOP_SPOOLING) {
return;
}
spoolingState = newState;
}
private boolean isCurrentlySpooling() {
return spoolingState == SpoolingState.SPOOLING;
}
private void startSpooling() {
setSpoolingState(SpoolingState.SPOOLING);
}
private void pauseSpooling() {
setSpoolingState(SpoolingState.PAUSE_SPOOLING);
}
private boolean isSpoolingStopped() {
return spoolingState == SpoolingState.STOP_SPOOLING;
}
private void stopSpooling() {
setSpoolingState(SpoolingState.STOP_SPOOLING);
}
public String getDir() {
List<String> dirs = context.getConfig().getStringList(ExecConstants.TEMP_DIRECTORIES);
return dirs.get(ThreadLocalRandom.current().nextInt(dirs.size()));
}
private synchronized void initSpooler() throws IOException {
if (spooler != null) {
return;
}
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, context.getConfig().getString(ExecConstants.TEMP_FILESYSTEM));
conf.set(DRILL_LOCAL_IMPL_STRING, LocalSyncableFileSystem.clreplaced.getName());
fs = FileSystem.get(conf);
path = getPath();
outputStream = fs.create(path);
final String spoolingThreadName = QueryIdHelper.getExecutorThreadName(context.getHandle()).concat(":Spooler-" + oppositeId + "-" + bufferIndex);
spooler = new Spooler(spoolingThreadName);
spooler.start();
}
@Override
protected void enqueueInner(RawFragmentBatch batch) throws IOException {
replacedert batch.getHeader().getSendingMajorFragmentId() == oppositeId;
logger.debug("Enqueue batch. Current buffer size: {}. Last batch: {}. Sending fragment: {}", bufferQueue.size(), batch.getHeader().getIsLastBatch(), batch.getHeader().getSendingMajorFragmentId());
RawFragmentBatchWrapper wrapper;
boolean spoolCurrentBatch = isCurrentlySpooling();
wrapper = new RawFragmentBatchWrapper(batch, !spoolCurrentBatch);
currentSizeInMemory += wrapper.getBodySize();
if (spoolCurrentBatch) {
if (spooler == null) {
initSpooler();
}
spooler.addBatchForSpooling(wrapper);
}
bufferQueue.add(wrapper);
if (!spoolCurrentBatch && currentSizeInMemory > threshold) {
logger.debug("Buffer size {} greater than threshold {}. Start spooling to disk", currentSizeInMemory, threshold);
startSpooling();
}
}
@Override
public void kill(FragmentContext context) {
allocator.close();
if (spooler != null) {
spooler.terminate();
}
}
@Override
protected void upkeep(RawFragmentBatch batch) {
if (context.isOverMemoryLimit()) {
outOfMemory.set(true);
}
DrillBuf body = batch.getBody();
if (body != null) {
currentSizeInMemory -= body.capacity();
}
if (isCurrentlySpooling() && currentSizeInMemory < threshold * STOP_SPOOLING_FRACTION) {
logger.debug("buffer size {} less than {}x threshold. Stop spooling.", currentSizeInMemory, STOP_SPOOLING_FRACTION);
pauseSpooling();
}
logger.debug("Got batch. Current buffer size: {}", bufferQueue.size());
}
@Override
public void close() {
if (spooler != null) {
spooler.terminate();
while (spooler.isAlive()) {
try {
spooler.join();
} catch (InterruptedException e) {
logger.warn("Interrupted while waiting for spooling thread to exit");
continue;
}
}
}
allocator.close();
try {
if (outputStream != null) {
outputStream.close();
}
} catch (IOException e) {
logger.warn("Failed to cleanup I/O streams", e);
}
if (context.getConfig().getBoolean(ExecConstants.SPOOLING_BUFFER_DELETE)) {
try {
if (fs != null) {
fs.delete(path, false);
logger.debug("Deleted file {}", path.toString());
}
} catch (IOException e) {
logger.warn("Failed to delete temporary files", e);
}
}
super.close();
}
private clreplaced Spooler extends Thread {
private final LinkedBlockingDeque<RawFragmentBatchWrapper> spoolingQueue;
private volatile boolean shouldContinue = true;
private Thread spoolingThread;
public Spooler(String name) {
setDaemon(true);
setName(name);
spoolingQueue = Queues.newLinkedBlockingDeque();
}
public void run() {
try {
while (shouldContinue) {
RawFragmentBatchWrapper batch;
try {
batch = spoolingQueue.take();
} catch (InterruptedException e) {
if (shouldContinue) {
continue;
} else {
break;
}
}
try {
batch.writeToStream(outputStream);
} catch (IOException e) {
context.fail(e);
}
}
} catch (Throwable e) {
context.fail(e);
} finally {
logger.info("Spooler thread exiting");
}
}
public void addBatchForSpooling(RawFragmentBatchWrapper batchWrapper) {
if (isSpoolingStopped()) {
spoolingQueue.add(batchWrapper);
} else {
// will not spill this batch
batchWrapper.available = true;
batchWrapper.batch.sendOk();
batchWrapper.latch.countDown();
}
}
public void terminate() {
stopSpooling();
shouldContinue = false;
if (spoolingThread.isAlive()) {
spoolingThread.interrupt();
}
}
}
clreplaced RawFragmentBatchWrapper {
private RawFragmentBatch batch;
private volatile boolean available;
private CountDownLatch latch;
private volatile int bodyLength;
private volatile boolean outOfMemory = false;
private long start = -1;
private long check;
public RawFragmentBatchWrapper(RawFragmentBatch batch, boolean available) {
Preconditions.checkNotNull(batch);
this.batch = batch;
this.available = available;
this.latch = new CountDownLatch(available ? 0 : 1);
if (available) {
batch.sendOk();
}
}
public boolean isNull() {
return batch == null;
}
public RawFragmentBatch get() throws InterruptedException, IOException {
if (available) {
replacedert batch.getHeader() != null : "batch header null";
return batch;
} else {
latch.await();
readFromStream();
available = true;
return batch;
}
}
public long getBodySize() {
if (batch.getBody() == null) {
return 0;
}
replacedert batch.getBody().readableBytes() >= 0;
return batch.getBody().readableBytes();
}
public void writeToStream(FSDataOutputStream stream) throws IOException {
Stopwatch watch = Stopwatch.createStarted();
available = false;
check = ThreadLocalRandom.current().nextLong();
start = stream.getPos();
logger.debug("Writing check value {} at position {}", check, start);
stream.writeLong(check);
batch.getHeader().writeDelimitedTo(stream);
ByteBuf buf = batch.getBody();
if (buf != null) {
bodyLength = buf.capacity();
} else {
bodyLength = 0;
}
if (bodyLength > 0) {
buf.getBytes(0, stream, bodyLength);
}
stream.hsync();
FileStatus status = fs.getFileStatus(path);
long len = status.getLen();
logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
batch.sendOk();
latch.countDown();
long t = watch.elapsed(TimeUnit.MICROSECONDS);
logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
if (buf != null) {
buf.release();
}
}
public void readFromStream() throws IOException, InterruptedException {
long pos = start;
boolean tryAgain = true;
int duration = 0;
while (tryAgain) {
// Sometimes, the file isn't quite done writing when we attempt to read it. As such, we need to wait and retry.
Thread.sleep(duration);
try (final FSDataInputStream stream = fs.open(path);
final DrillBuf buf = allocator.buffer(bodyLength)) {
stream.seek(start);
final long currentPos = stream.getPos();
final long check = stream.readLong();
pos = stream.getPos();
replacedert check == this.check : String.format("Check values don't match: %d %d, Position %d", this.check, check, currentPos);
Stopwatch watch = Stopwatch.createStarted();
BitData.FragmentRecordBatch header = BitData.FragmentRecordBatch.parseDelimitedFrom(stream);
pos = stream.getPos();
replacedert header != null : "header null after parsing from stream";
buf.writeBytes(stream, bodyLength);
pos = stream.getPos();
batch = new RawFragmentBatch(header, buf, null);
available = true;
latch.countDown();
long t = watch.elapsed(TimeUnit.MICROSECONDS);
logger.debug("Took {} us to read {} from disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
tryAgain = false;
} catch (EOFException e) {
FileStatus status = fs.getFileStatus(path);
logger.warn("EOF reading from file {} at pos {}. Current file size: {}", path, pos, status.getLen());
duration = Math.max(1, duration * 2);
if (duration < 60000) {
continue;
} else {
throw e;
}
} finally {
if (tryAgain) {
// we had a premature exit, release batch memory so we don't leak it.
if (batch != null) {
batch.getBody().release();
}
}
}
}
}
private boolean isOutOfMemory() {
return outOfMemory;
}
private void setOutOfMemory(boolean outOfMemory) {
this.outOfMemory = outOfMemory;
}
}
private Path getPath() {
ExecProtos.FragmentHandle handle = context.getHandle();
String qid = QueryIdHelper.getQueryId(handle.getQueryId());
int majorFragmentId = handle.getMajorFragmentId();
int minorFragmentId = handle.getMinorFragmentId();
String fileName = Joiner.on(Path.SEPARATOR).join(getDir(), qid, majorFragmentId, minorFragmentId, oppositeId, bufferIndex);
return new Path(fileName);
}
}
19
Source : JsonRecordWriter.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
public clreplaced JsonRecordWriter extends JSONOutputRecordWriter implements RecordWriter {
private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(JsonRecordWriter.clreplaced);
private static final String LINE_FEED = String.format("%n");
private Path cleanUpLocation;
private String location;
private String prefix;
private String fieldDelimiter;
private String extension;
private boolean useExtendedOutput;
private int index;
private FileSystem fs = null;
private FSDataOutputStream stream = null;
private final JsonFactory factory = new JsonFactory();
private final StorageStrategy storageStrategy;
// Record write status
// true once the startRecord() is called until endRecord() is called
private boolean fRecordStarted = false;
public JsonRecordWriter(StorageStrategy storageStrategy) {
this.storageStrategy = storageStrategy == null ? StorageStrategy.DEFAULT : storageStrategy;
}
@Override
public void init(Map<String, String> writerOptions) throws IOException {
this.location = writerOptions.get("location");
this.prefix = writerOptions.get("prefix");
this.fieldDelimiter = writerOptions.get("separator");
this.extension = writerOptions.get("extension");
this.useExtendedOutput = Boolean.parseBoolean(writerOptions.get("extended"));
this.skipNullFields = Boolean.parseBoolean(writerOptions.get("skipnulls"));
final boolean uglify = Boolean.parseBoolean(writerOptions.get("uglify"));
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, writerOptions.get(FileSystem.FS_DEFAULT_NAME_KEY));
this.fs = FileSystem.get(conf);
Path fileName = new Path(location, prefix + "_" + index + "." + extension);
try {
// json writer does not support parreplacedions, so only one file can be created
// and thus only one location should be deleted in case of abort
// to ensure that our writer was the first to create output file,
// we create empty output file first and fail if file exists
cleanUpLocation = storageStrategy.createFileAndApply(fs, fileName);
// since empty output file will be overwritten (some file systems may restrict append option)
// we need to re-apply file permission
stream = fs.create(fileName);
storageStrategy.applyToFile(fs, fileName);
JsonGenerator generator = factory.createGenerator(stream).useDefaultPrettyPrinter();
if (uglify) {
generator = generator.setPrettyPrinter(new MinimalPrettyPrinter(LINE_FEED));
}
if (useExtendedOutput) {
gen = new ExtendedJsonOutput(generator);
} else {
gen = new BasicJsonOutput(generator);
}
logger.debug("Created file: {}", fileName);
} catch (IOException ex) {
logger.error("Unable to create file: " + fileName, ex);
throw ex;
}
}
@Override
public void updateSchema(VectorAccessible batch) throws IOException {
// no op
}
@Override
public FieldConverter getNewMapConverter(int fieldId, String fieldName, FieldReader reader) {
return new MapJsonConverter(fieldId, fieldName, reader);
}
public clreplaced MapJsonConverter extends FieldConverter {
List<FieldConverter> converters = Lists.newArrayList();
public MapJsonConverter(int fieldId, String fieldName, FieldReader reader) {
super(fieldId, fieldName, reader);
int i = 0;
for (String name : reader) {
FieldConverter converter = EventBasedRecordWriter.getConverter(JsonRecordWriter.this, i++, name, reader.reader(name));
converters.add(converter);
}
}
@Override
public void startField() throws IOException {
gen.writeFieldName(fieldName);
}
@Override
public void writeField() throws IOException {
gen.writeStartObject();
for (FieldConverter converter : converters) {
converter.startField();
converter.writeField();
}
gen.writeEndObject();
}
}
@Override
public FieldConverter getNewUnionConverter(int fieldId, String fieldName, FieldReader reader) {
return new UnionJsonConverter(fieldId, fieldName, reader);
}
public clreplaced UnionJsonConverter extends FieldConverter {
public UnionJsonConverter(int fieldId, String fieldName, FieldReader reader) {
super(fieldId, fieldName, reader);
}
@Override
public void startField() throws IOException {
gen.writeFieldName(fieldName);
}
@Override
public void writeField() throws IOException {
JsonWriter writer = new JsonWriter(gen);
writer.write(reader);
}
}
@Override
public FieldConverter getNewRepeatedMapConverter(int fieldId, String fieldName, FieldReader reader) {
return new RepeatedMapJsonConverter(fieldId, fieldName, reader);
}
public clreplaced RepeatedMapJsonConverter extends FieldConverter {
List<FieldConverter> converters = Lists.newArrayList();
public RepeatedMapJsonConverter(int fieldId, String fieldName, FieldReader reader) {
super(fieldId, fieldName, reader);
int i = 0;
for (String name : reader) {
FieldConverter converter = EventBasedRecordWriter.getConverter(JsonRecordWriter.this, i++, name, reader.reader(name));
converters.add(converter);
}
}
@Override
public void startField() throws IOException {
gen.writeFieldName(fieldName);
}
@Override
public void writeField() throws IOException {
gen.writeStartArray();
while (reader.next()) {
gen.writeStartObject();
for (FieldConverter converter : converters) {
converter.startField();
converter.writeField();
}
gen.writeEndObject();
}
gen.writeEndArray();
}
}
@Override
public FieldConverter getNewRepeatedListConverter(int fieldId, String fieldName, FieldReader reader) {
return new RepeatedListJsonConverter(fieldId, fieldName, reader);
}
public clreplaced RepeatedListJsonConverter extends FieldConverter {
FieldConverter converter;
public RepeatedListJsonConverter(int fieldId, String fieldName, FieldReader reader) {
super(fieldId, fieldName, reader);
converter = EventBasedRecordWriter.getConverter(JsonRecordWriter.this, fieldId, fieldName, reader.reader());
}
@Override
public void startField() throws IOException {
gen.writeFieldName(fieldName);
}
@Override
public void writeField() throws IOException {
gen.writeStartArray();
while (reader.next()) {
converter.writeField();
}
gen.writeEndArray();
}
}
@Override
public void startRecord() throws IOException {
gen.writeStartObject();
fRecordStarted = true;
}
@Override
public void endRecord() throws IOException {
gen.writeEndObject();
fRecordStarted = false;
}
@Override
public void abort() throws IOException {
if (cleanUpLocation != null) {
fs.delete(cleanUpLocation, true);
logger.info("Aborting writer. Location [{}] on file system [{}] is deleted.", cleanUpLocation.toUri().getPath(), fs.getUri());
}
}
@Override
public void cleanup() throws IOException {
gen.flush();
stream.close();
}
}
19
Source : BatchGroup.java
with Apache License 2.0
from zpochen
with Apache License 2.0
from zpochen
public clreplaced BatchGroup implements VectorAccessible, AutoCloseable {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BatchGroup.clreplaced);
private VectorContainer currentContainer;
private SelectionVector2 sv2;
private int pointer = 0;
private FSDataInputStream inputStream;
private FSDataOutputStream outputStream;
private Path path;
private FileSystem fs;
private BufferAllocator allocator;
private int spilledBatches = 0;
private OperatorContext context;
private BatchSchema schema;
public BatchGroup(VectorContainer container, SelectionVector2 sv2, OperatorContext context) {
this.sv2 = sv2;
this.currentContainer = container;
this.context = context;
}
public BatchGroup(VectorContainer container, FileSystem fs, String path, OperatorContext context) {
currentContainer = container;
this.fs = fs;
this.path = new Path(path);
this.allocator = context.getAllocator();
this.context = context;
}
public SelectionVector2 getSv2() {
return sv2;
}
/**
* Updates the schema for this batch group. The current as well as any deserialized batches will be coerced to this schema
* @param schema
*/
public void setSchema(BatchSchema schema) {
currentContainer = SchemaUtil.coerceContainer(currentContainer, schema, context);
this.schema = schema;
}
public void addBatch(VectorContainer newContainer) throws IOException {
replacedert fs != null;
replacedert path != null;
if (outputStream == null) {
outputStream = fs.create(path);
}
int recordCount = newContainer.getRecordCount();
WritableBatch batch = WritableBatch.getBatchNoHVWrap(recordCount, newContainer, false);
VectorAccessibleSerializable outputBatch = new VectorAccessibleSerializable(batch, allocator);
Stopwatch watch = Stopwatch.createStarted();
outputBatch.writeToStream(outputStream);
newContainer.zeroVectors();
logger.debug("Took {} us to spill {} records", watch.elapsed(TimeUnit.MICROSECONDS), recordCount);
spilledBatches++;
}
private VectorContainer getBatch() throws IOException {
replacedert fs != null;
replacedert path != null;
if (inputStream == null) {
inputStream = fs.open(path);
}
VectorAccessibleSerializable vas = new VectorAccessibleSerializable(allocator);
Stopwatch watch = Stopwatch.createStarted();
vas.readFromStream(inputStream);
VectorContainer c = vas.get();
if (schema != null) {
c = SchemaUtil.coerceContainer(c, schema, context);
}
logger.trace("Took {} us to read {} records", watch.elapsed(TimeUnit.MICROSECONDS), c.getRecordCount());
spilledBatches--;
currentContainer.zeroVectors();
Iterator<VectorWrapper<?>> wrapperIterator = c.iterator();
for (VectorWrapper<?> w : currentContainer) {
TransferPair pair = wrapperIterator.next().getValueVector().makeTransferPair(w.getValueVector());
pair.transfer();
}
currentContainer.setRecordCount(c.getRecordCount());
c.zeroVectors();
return c;
}
public int getNextIndex() {
int val;
if (pointer == getRecordCount()) {
if (spilledBatches == 0) {
return -1;
}
try {
currentContainer.zeroVectors();
getBatch();
} catch (IOException e) {
throw new RuntimeException(e);
}
pointer = 1;
return 0;
}
if (sv2 == null) {
val = pointer;
pointer++;
replacedert val < currentContainer.getRecordCount();
} else {
val = pointer;
pointer++;
replacedert val < currentContainer.getRecordCount();
val = sv2.getIndex(val);
}
return val;
}
public VectorContainer getContainer() {
return currentContainer;
}
@Override
public void close() throws IOException {
currentContainer.zeroVectors();
if (sv2 != null) {
sv2.clear();
}
if (outputStream != null) {
outputStream.close();
}
if (inputStream != null) {
inputStream.close();
}
if (fs != null && fs.exists(path)) {
fs.delete(path, false);
}
}
public void closeOutputStream() throws IOException {
if (outputStream != null) {
outputStream.close();
}
}
@Override
public VectorWrapper<?> getValueAccessorById(Clreplaced<?> clazz, int... ids) {
return currentContainer.getValueAccessorById(clazz, ids);
}
@Override
public TypedFieldId getValueVectorId(SchemaPath path) {
return currentContainer.getValueVectorId(path);
}
@Override
public BatchSchema getSchema() {
return currentContainer.getSchema();
}
@Override
public int getRecordCount() {
if (sv2 != null) {
return sv2.getCount();
} else {
return currentContainer.getRecordCount();
}
}
@Override
public Iterator<VectorWrapper<?>> iterator() {
return currentContainer.iterator();
}
@Override
public SelectionVector2 getSelectionVector2() {
throw new UnsupportedOperationException();
}
@Override
public SelectionVector4 getSelectionVector4() {
throw new UnsupportedOperationException();
}
}
19
Source : HdfsUpload.java
with Apache License 2.0
from whirlys
with Apache License 2.0
from whirlys
/**
* 追加文件,从本地追加到 HDFS 的文件中
*/
public static void appendToFile(String localPathStr, String hdfsPathStr) {
Path hdfsPath = new Path(hdfsPathStr);
try (FileInputStream inputStream = new FileInputStream(localPathStr)) {
FSDataOutputStream outputStream = fileSystem.append(hdfsPath);
byte[] data = new byte[1024];
int read = -1;
while ((read = inputStream.read(data)) > 0) {
outputStream.write(data, 0, read);
}
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
19
Source : FileStreamToken.java
with Apache License 2.0
from ucarGroup
with Apache License 2.0
from ucarGroup
/**
* HDFS文件流令牌类
*
* @author lubiao
*/
public clreplaced FileStreamToken {
private volatile String pathString;
private volatile Path path;
private volatile DistributedFileSystem fileSystem;
private volatile FSDataOutputStream fileStream;
private volatile long lastUpdateTime;
private volatile long lastHSyncTime;
public FileStreamToken(String pathString, Path path, DistributedFileSystem fileSystem, FSDataOutputStream fileStream) {
this.pathString = pathString;
this.path = path;
this.fileSystem = fileSystem;
this.fileStream = fileStream;
this.lastUpdateTime = System.currentTimeMillis();
this.lastHSyncTime = 0;
}
public String getPathString() {
return pathString;
}
public void setPathString(String pathString) {
this.pathString = pathString;
}
public Path getPath() {
return path;
}
public void setPath(Path path) {
this.path = path;
}
public DistributedFileSystem getFileSystem() {
return fileSystem;
}
public void setFileSystem(DistributedFileSystem fileSystem) {
this.fileSystem = fileSystem;
}
public FSDataOutputStream getFileStream() {
return fileStream;
}
public void setFileStream(FSDataOutputStream fileStream) {
this.fileStream = fileStream;
}
public long getLastUpdateTime() {
return lastUpdateTime;
}
public void setLastUpdateTime(long lastUpdateTime) {
this.lastUpdateTime = lastUpdateTime;
}
public long getLastHSyncTime() {
return lastHSyncTime;
}
public void setLastHSyncTime(long lastHSyncTime) {
this.lastHSyncTime = lastHSyncTime;
}
}
19
Source : FileStreamToken.java
with Apache License 2.0
from ucarGroup
with Apache License 2.0
from ucarGroup
public void setFileStream(FSDataOutputStream fileStream) {
this.fileStream = fileStream;
}
19
Source : BaseRecordHandler.java
with Apache License 2.0
from ucarGroup
with Apache License 2.0
from ucarGroup
private void hflush(FSDataOutputStream fsOut) throws Exception {
fsOut.hflush();
}
19
Source : TestRubixCaching.java
with Apache License 2.0
from trinodb
with Apache License 2.0
from trinodb
private void writeFile(FSDataOutputStream outputStream, byte[] content) throws IOException {
try {
outputStream.write(content);
} finally {
outputStream.close();
}
}
19
Source : HDFSSequenceFile.java
with MIT License
from TranswarpCN
with MIT License
from TranswarpCN
public clreplaced HDFSSequenceFile extends AbstractHDFSWriter {
private static final Logger logger = LoggerFactory.getLogger(HDFSSequenceFile.clreplaced);
private SequenceFile.Writer writer;
private String writeFormat;
private Context serializerContext;
private SequenceFileSerializer serializer;
private boolean useRawLocalFileSystem;
private FSDataOutputStream outStream = null;
public HDFSSequenceFile() {
writer = null;
}
@Override
public void configure(Context context) {
super.configure(context);
// use binary writable serialize by default
writeFormat = context.getString("hdfs.writeFormat", SequenceFileSerializerType.Writable.name());
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem", false);
serializerContext = new Context(context.getSubProperties(SequenceFileSerializerFactory.CTX_PREFIX));
serializer = SequenceFileSerializerFactory.getSerializer(writeFormat, serializerContext);
logger.info("writeFormat = " + writeFormat + ", UseRawLocalFileSystem = " + useRawLocalFileSystem);
}
@Override
public void open(String filePath) throws IOException {
open(filePath, null, CompressionType.NONE);
}
@Override
public void open(String filePath, CompressionCodec codeC, CompressionType compType) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = dstPath.getFileSystem(conf);
open(dstPath, codeC, compType, conf, hdfs);
}
protected void open(Path dstPath, CompressionCodec codeC, CompressionType compType, Configuration conf, FileSystem hdfs) throws IOException {
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem) hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " + "is not of type LocalFileSystem: " + hdfs.getClreplaced().getName());
}
}
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
outStream = hdfs.append(dstPath);
} else {
outStream = hdfs.create(dstPath);
}
writer = SequenceFile.createWriter(conf, outStream, serializer.getKeyClreplaced(), serializer.getValueClreplaced(), compType, codeC);
registerCurrentStream(outStream, hdfs, dstPath);
}
@Override
public void append(Event e) throws IOException {
for (SequenceFileSerializer.Record record : serializer.serialize(e)) {
writer.append(record.getKey(), record.getValue());
}
}
@Override
public void sync() throws IOException {
hflushOrSync(outStream);
}
@Override
public void close() throws IOException {
writer.close();
outStream.close();
unregisterCurrentStream();
}
}
19
Source : HDFSDataStream.java
with MIT License
from TranswarpCN
with MIT License
from TranswarpCN
public clreplaced HDFSDataStream extends AbstractHDFSWriter {
private static final Logger logger = LoggerFactory.getLogger(HDFSDataStream.clreplaced);
private FSDataOutputStream outStream;
private String serializerType;
private Context serializerContext;
private EventSerializer serializer;
private boolean useRawLocalFileSystem;
@Override
public void configure(Context context) {
super.configure(context);
serializerType = context.getString("serializer", "TEXT");
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem", false);
serializerContext = new Context(context.getSubProperties(EventSerializer.CTX_PREFIX));
logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = " + useRawLocalFileSystem);
}
@VisibleForTesting
protected FileSystem getDfs(Configuration conf, Path dstPath) throws IOException {
return dstPath.getFileSystem(conf);
}
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem) hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " + "is not of type LocalFileSystem: " + hdfs.getClreplaced().getName());
}
}
boolean appending = false;
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
outStream = hdfs.append(dstPath);
appending = true;
} else {
outStream = hdfs.create(dstPath);
}
serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, outStream);
if (appending && !serializer.supportsReopen()) {
outStream.close();
serializer = null;
throw new IOException("serializer (" + serializerType + ") does not support append");
}
// must call superclreplaced to check for replication issues
registerCurrentStream(outStream, hdfs, dstPath);
if (appending) {
serializer.afterReopen();
} else {
serializer.afterCreate();
}
}
@Override
public void open(String filePath) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = getDfs(conf, dstPath);
doOpen(conf, dstPath, hdfs);
}
@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
open(filePath);
}
@Override
public void append(Event e) throws IOException {
serializer.write(e);
}
@Override
public void sync() throws IOException {
serializer.flush();
outStream.flush();
hflushOrSync(outStream);
}
@Override
public void close() throws IOException {
serializer.flush();
serializer.beforeClose();
outStream.flush();
hflushOrSync(outStream);
outStream.close();
unregisterCurrentStream();
}
}
19
Source : HDFSCompressedDataStream.java
with MIT License
from TranswarpCN
with MIT License
from TranswarpCN
public clreplaced HDFSCompressedDataStream extends AbstractHDFSWriter {
private static final Logger logger = LoggerFactory.getLogger(HDFSCompressedDataStream.clreplaced);
private FSDataOutputStream fsOut;
private CompressionOutputStream cmpOut;
private boolean isFinished = false;
private String serializerType;
private Context serializerContext;
private EventSerializer serializer;
private boolean useRawLocalFileSystem;
private Compressor compressor;
@Override
public void configure(Context context) {
super.configure(context);
serializerType = context.getString("serializer", "TEXT");
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem", false);
serializerContext = new Context(context.getSubProperties(EventSerializer.CTX_PREFIX));
logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = " + useRawLocalFileSystem);
}
@Override
public void open(String filePath) throws IOException {
DefaultCodec defCodec = new DefaultCodec();
CompressionType cType = CompressionType.BLOCK;
open(filePath, defCodec, cType);
}
@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = dstPath.getFileSystem(conf);
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem) hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " + "is not of type LocalFileSystem: " + hdfs.getClreplaced().getName());
}
}
boolean appending = false;
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
fsOut = hdfs.append(dstPath);
appending = true;
} else {
fsOut = hdfs.create(dstPath);
}
if (compressor == null) {
compressor = CodecPool.getCompressor(codec, conf);
}
cmpOut = codec.createOutputStream(fsOut, compressor);
serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, cmpOut);
if (appending && !serializer.supportsReopen()) {
cmpOut.close();
serializer = null;
throw new IOException("serializer (" + serializerType + ") does not support append");
}
registerCurrentStream(fsOut, hdfs, dstPath);
if (appending) {
serializer.afterReopen();
} else {
serializer.afterCreate();
}
isFinished = false;
}
@Override
public void append(Event e) throws IOException {
if (isFinished) {
cmpOut.resetState();
isFinished = false;
}
serializer.write(e);
}
@Override
public void sync() throws IOException {
// We must use finish() and resetState() here -- flush() is apparently not
// supported by the compressed output streams (it's a no-op).
// Also, since resetState() writes headers, avoid calling it without an
// additional write/append operation.
// Note: There are bugs in Hadoop & JDK w/ pure-java gzip; see HADOOP-8522.
serializer.flush();
if (!isFinished) {
cmpOut.finish();
isFinished = true;
}
fsOut.flush();
hflushOrSync(this.fsOut);
}
@Override
public void close() throws IOException {
serializer.flush();
serializer.beforeClose();
if (!isFinished) {
cmpOut.finish();
isFinished = true;
}
fsOut.flush();
hflushOrSync(fsOut);
cmpOut.close();
if (compressor != null) {
CodecPool.returnCompressor(compressor);
compressor = null;
}
unregisterCurrentStream();
}
}
19
Source : AbstractHDFSWriter.java
with MIT License
from TranswarpCN
with MIT License
from TranswarpCN
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract clreplaced AbstractHDFSWriter implements HDFSWriter {
private static final Logger logger = LoggerFactory.getLogger(AbstractHDFSWriter.clreplaced);
private FSDataOutputStream outputStream;
private FileSystem fs;
private Path destPath;
private Method refGetNumCurrentReplicas = null;
private Method refGetDefaultReplication = null;
private Method refHflushOrSync = null;
private Integer configuredMinReplicas = null;
private Integer numberOfCloseRetries = null;
private long timeBetweenCloseRetries = Long.MAX_VALUE;
final static Object[] NO_ARGS = new Object[] {};
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0, "hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries / numberOfCloseRetries, 1000);
}
}
/**
* Contract for subclreplacedes: Call registerCurrentStream() on open,
* unregisterCurrentStream() on close, and the base clreplaced takes care of the
* rest.
* @return
*/
@Override
public boolean isUnderReplicated() {
try {
int numBlocks = getNumCurrentReplicas();
if (numBlocks == -1) {
return false;
}
int desiredBlocks;
if (configuredMinReplicas != null) {
desiredBlocks = configuredMinReplicas;
} else {
desiredBlocks = getFsDesiredReplication();
}
return numBlocks < desiredBlocks;
} catch (IllegalAccessException e) {
logger.error("Unexpected error while checking replication factor", e);
} catch (InvocationTargetException e) {
logger.error("Unexpected error while checking replication factor", e);
} catch (IllegalArgumentException e) {
logger.error("Unexpected error while checking replication factor", e);
}
return false;
}
protected void registerCurrentStream(FSDataOutputStream outputStream, FileSystem fs, Path destPath) {
Preconditions.checkNotNull(outputStream, "outputStream must not be null");
Preconditions.checkNotNull(fs, "fs must not be null");
Preconditions.checkNotNull(destPath, "destPath must not be null");
this.outputStream = outputStream;
this.fs = fs;
this.destPath = destPath;
this.refGetNumCurrentReplicas = reflectGetNumCurrentReplicas(outputStream);
this.refGetDefaultReplication = reflectGetDefaultReplication(fs);
this.refHflushOrSync = reflectHflushOrSync(outputStream);
}
protected void unregisterCurrentStream() {
this.outputStream = null;
this.fs = null;
this.destPath = null;
this.refGetNumCurrentReplicas = null;
this.refGetDefaultReplication = null;
}
public int getFsDesiredReplication() {
short replication = 0;
if (fs != null && destPath != null) {
if (refGetDefaultReplication != null) {
try {
replication = (Short) refGetDefaultReplication.invoke(fs, destPath);
} catch (IllegalAccessException e) {
logger.warn("Unexpected error calling getDefaultReplication(Path)", e);
} catch (InvocationTargetException e) {
logger.warn("Unexpected error calling getDefaultReplication(Path)", e);
}
} else {
// will not work on Federated HDFS (see HADOOP-8014)
replication = fs.getDefaultReplication();
}
}
return replication;
}
/**
* This method gets the datanode replication count for the current open file.
*
* If the pipeline isn't started yet or is empty, you will get the default
* replication factor.
*
* <p/>If this function returns -1, it means you
* are not properly running with the HDFS-826 patch.
* @throws InvocationTargetException
* @throws IllegalAccessException
* @throws IllegalArgumentException
*/
public int getNumCurrentReplicas() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
if (refGetNumCurrentReplicas != null && outputStream != null) {
OutputStream dfsOutputStream = outputStream.getWrappedStream();
if (dfsOutputStream != null) {
Object repl = refGetNumCurrentReplicas.invoke(dfsOutputStream, NO_ARGS);
if (repl instanceof Integer) {
return ((Integer) repl).intValue();
}
}
}
return -1;
}
/**
* Find the 'getNumCurrentReplicas' on the preplaceded <code>os</code> stream.
* @return Method or null.
*/
private Method reflectGetNumCurrentReplicas(FSDataOutputStream os) {
Method m = null;
if (os != null) {
Clreplaced<? extends OutputStream> wrappedStreamClreplaced = os.getWrappedStream().getClreplaced();
try {
m = wrappedStreamClreplaced.getDeclaredMethod("getNumCurrentReplicas", new Clreplaced<?>[] {});
m.setAccessible(true);
} catch (NoSuchMethodException e) {
logger.info("FileSystem's output stream doesn't support" + " getNumCurrentReplicas; --HDFS-826 not available; fsOut=" + wrappedStreamClreplaced.getName() + "; err=" + e);
} catch (SecurityException e) {
logger.info("Doesn't have access to getNumCurrentReplicas on " + "FileSystems's output stream --HDFS-826 not available; fsOut=" + wrappedStreamClreplaced.getName(), e);
// could happen on setAccessible()
m = null;
}
}
if (m != null) {
logger.debug("Using getNumCurrentReplicas--HDFS-826");
}
return m;
}
/**
* Find the 'getDefaultReplication' method on the preplaceded <code>fs</code>
* FileSystem that takes a Path argument.
* @return Method or null.
*/
private Method reflectGetDefaultReplication(FileSystem fileSystem) {
Method m = null;
if (fileSystem != null) {
Clreplaced<?> fsClreplaced = fileSystem.getClreplaced();
try {
m = fsClreplaced.getMethod("getDefaultReplication", new Clreplaced<?>[] { Path.clreplaced });
} catch (NoSuchMethodException e) {
logger.debug("FileSystem implementation doesn't support" + " getDefaultReplication(Path); -- HADOOP-8014 not available; " + "clreplacedName = " + fsClreplaced.getName() + "; err = " + e);
} catch (SecurityException e) {
logger.debug("No access to getDefaultReplication(Path) on " + "FileSystem implementation -- HADOOP-8014 not available; " + "clreplacedName = " + fsClreplaced.getName() + "; err = " + e);
}
}
if (m != null) {
logger.debug("Using FileSystem.getDefaultReplication(Path) from " + "HADOOP-8014");
}
return m;
}
private Method reflectHflushOrSync(FSDataOutputStream os) {
Method m = null;
if (os != null) {
Clreplaced<?> fsDataOutputStreamClreplaced = os.getClreplaced();
try {
m = fsDataOutputStreamClreplaced.getMethod("hflush");
} catch (NoSuchMethodException ex) {
logger.debug("HFlush not found. Will use sync() instead");
try {
m = fsDataOutputStreamClreplaced.getMethod("sync");
} catch (Exception ex1) {
String msg = "Neither hflush not sync were found. That seems to be " + "a problem!";
logger.error(msg);
throw new FlumeException(msg, ex1);
}
}
}
return m;
}
/**
* If hflush is available in this version of HDFS, then this method calls
* hflush, else it calls sync.
* @param os - The stream to flush/sync
* @throws IOException
*/
protected void hflushOrSync(FSDataOutputStream os) throws IOException {
try {
// At this point the refHflushOrSync cannot be null,
// since register method would have thrown if it was.
this.refHflushOrSync.invoke(os);
} catch (InvocationTargetException e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
Throwable cause = e.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException) cause;
}
throw new FlumeException(msg, e);
} catch (Exception e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
throw new FlumeException(msg, e);
}
}
}
19
Source : AbstractHDFSWriter.java
with MIT License
from TranswarpCN
with MIT License
from TranswarpCN
/**
* If hflush is available in this version of HDFS, then this method calls
* hflush, else it calls sync.
* @param os - The stream to flush/sync
* @throws IOException
*/
protected void hflushOrSync(FSDataOutputStream os) throws IOException {
try {
// At this point the refHflushOrSync cannot be null,
// since register method would have thrown if it was.
this.refHflushOrSync.invoke(os);
} catch (InvocationTargetException e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
Throwable cause = e.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException) cause;
}
throw new FlumeException(msg, e);
} catch (Exception e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
throw new FlumeException(msg, e);
}
}
19
Source : GenericSourceSink.java
with Apache License 2.0
from snuspl
with Apache License 2.0
from snuspl
/**
* Write output to HDFS according to the parallelism.
*/
final clreplaced HDFSWrite extends DoFn<String, Void> {
private static final Logger LOG = LoggerFactory.getLogger(HDFSWrite.clreplaced.getName());
private final String path;
private Path fileName;
private FileSystem fileSystem;
private FSDataOutputStream outputStream;
/**
* Constructor.
*
* @param path HDFS path
*/
HDFSWrite(final String path) {
this.path = path;
}
/**
* Start bundle.
* The number of output files are determined according to the parallelism.
* i.e. if parallelism is 2, then there are total 2 output files.
* Each output file is written as a bundle.
* @param c bundle context {@link StartBundleContext}
*/
@StartBundle
public void startBundle(final StartBundleContext c) {
fileName = new Path(path + UUID.randomUUID().toString());
try {
fileSystem = fileName.getFileSystem(new JobConf());
outputStream = fileSystem.create(fileName, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* process element.
* @param c context {@link ProcessContext}
* @throws Exception exception.
*/
@ProcessElement
public void processElement(final ProcessContext c) throws Exception {
try {
outputStream.writeBytes(c.element() + "\n");
} catch (Exception e) {
outputStream.close();
fileSystem.delete(fileName, true);
throw new RuntimeException(e);
}
}
/**
* finish bundle.
* @param c context
* @throws IOException output stream exception
*/
@FinishBundle
public void finishBundle(final FinishBundleContext c) throws IOException {
outputStream.close();
}
}
19
Source : WARCFileWriter.java
with Apache License 2.0
from ScaleUnlimited
with Apache License 2.0
from ScaleUnlimited
/**
* Creates an output segment file and sets up the output streams to point at it. If the file
* already exists, retries with a different filename. This is a bit nasty -- after all,
* {@link FileOutputFormat}'s work directory concept is supposed to prevent filename clashes --
* but it looks like Amazon Elastic MapReduce prevents use of per-task work directories if the
* output of a job is on S3.
*
* TODO: Investigate this and find a better solution.
*/
private void createSegment() throws IOException {
_segmentsAttempted = 0;
_bytesWritten = 0;
boolean success = false;
while (!success) {
Path path = _workOutputPath.suffix(String.format(_extensionFormat, _segmentsCreated, _segmentsAttempted));
FileSystem fs = path.getFileSystem(_conf);
try {
// The o.a.h.mapred OutputFormats overwrite existing files, whereas
// the o.a.h.mapreduce OutputFormats don't overwrite. Bizarre...
// Here, overwrite if progress != null, i.e. if using mapred API.
FSDataOutputStream fsStream = (_progress == null) ? fs.create(path, false) : fs.create(path, _progress);
_byteStream = new CountingOutputStream(new BufferedOutputStream(fsStream));
_dataStream = new DataOutputStream(_codec == null ? _byteStream : _codec.createOutputStream(_byteStream));
_segmentsCreated++;
logger.info("Writing to output file: {}", path);
success = true;
} catch (IOException e) {
if (e.getMessage().startsWith("File already exists")) {
logger.warn("Tried to create file {} but it already exists; retrying.", path);
// retry
_segmentsAttempted++;
} else {
throw e;
}
}
}
}
19
Source : TextMultiOutputFormat.java
with Apache License 2.0
from Qihoo360
with Apache License 2.0
from Qihoo360
public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
boolean ignoreSeparatorOnNull = job.getBoolean("mapred.textoutputformat.ignore.separator", false);
String keyValueSeparator = job.get("mapred.textoutputformat.separator", "\t");
splitSize = job.getLong(MR_REDUCE_MAX_FILE_PER_FILE, SPLIT_SIZE);
jobConf = job;
fileName = name;
jobProgress = progress;
Clreplaced<? extends CompressionCodec> codecClreplaced = getOutputCompressorClreplaced(job, GzipCodec.clreplaced);
// create the named codec
codec = ReflectionUtils.newInstance(codecClreplaced, job);
FSDataOutputStream fileOut = createFile();
return new MultiSplitRecordWriter<K, V>(new NewDataOutputStream(codec.createOutputStream(fileOut)), keyValueSeparator, ignoreSeparatorOnNull);
}
19
Source : FileSystemNodeLabelsStore.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public clreplaced FileSystemNodeLabelsStore extends NodeLabelsStore {
public FileSystemNodeLabelsStore(CommonNodeLabelsManager mgr) {
super(mgr);
}
protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.clreplaced);
protected static final String DEFAULT_DIR_NAME = "node-labels";
protected static final String MIRROR_FILENAME = "nodelabel.mirror";
protected static final String EDITLOG_FILENAME = "nodelabel.editlog";
protected enum SerializedLogType {
ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS
}
Path fsWorkingPath;
FileSystem fs;
FSDataOutputStream editlogOs;
Path editLogPath;
private String getDefaultFSNodeLabelsRootDir() throws IOException {
// default is in local: /tmp/hadoop-yarn-${user}/node-labels/
return "file:///tmp/hadoop-yarn-" + UserGroupInformation.getCurrentUser().getShortUserName() + "/" + DEFAULT_DIR_NAME;
}
@Override
public void init(Configuration conf) throws Exception {
fsWorkingPath = new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR, getDefaultFSNodeLabelsRootDir()));
setFileSystem(conf);
// mkdir of root dir path
fs.mkdirs(fsWorkingPath);
}
@Override
public void close() throws IOException {
try {
fs.close();
editlogOs.close();
} catch (IOException e) {
LOG.warn("Exception happened whiling shutting down,", e);
}
}
private void setFileSystem(Configuration conf) throws IOException {
Configuration confCopy = new Configuration(conf);
confCopy.setBoolean("dfs.client.retry.policy.enabled", true);
String retryPolicy = confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC, YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC);
confCopy.set("dfs.client.retry.policy.spec", retryPolicy);
fs = fsWorkingPath.getFileSystem(confCopy);
// if it's local file system, use RawLocalFileSystem instead of
// LocalFileSystem, the latter one doesn't support append.
if (fs.getScheme().equals("file")) {
fs = ((LocalFileSystem) fs).getRaw();
}
}
private void ensureAppendEditlogFile() throws IOException {
editlogOs = fs.append(editLogPath);
}
private void ensureCloseEditlogFile() throws IOException {
editlogOs.close();
}
@Override
public void updateNodeToLabelsMappings(Map<NodeId, Set<String>> nodeToLabels) throws IOException {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal());
((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest.newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs);
ensureCloseEditlogFile();
}
@Override
public void storeNewClusterNodeLabels(Set<String> labels) throws IOException {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal());
((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest.newInstance(labels)).getProto().writeDelimitedTo(editlogOs);
ensureCloseEditlogFile();
}
@Override
public void removeClusterNodeLabels(Collection<String> labels) throws IOException {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal());
((RemoveFromClusterNodeLabelsRequestPBImpl) RemoveFromClusterNodeLabelsRequest.newInstance(Sets.newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs);
ensureCloseEditlogFile();
}
@Override
public void recover() throws IOException {
/*
* Steps of recover
* 1) Read from last mirror (from mirror or mirror.old)
* 2) Read from last edit log, and apply such edit log
* 3) Write new mirror to mirror.writing
* 4) Rename mirror to mirror.old
* 5) Move mirror.writing to mirror
* 6) Remove mirror.old
* 7) Remove edit log and create a new empty edit log
*/
// Open mirror from serialized file
Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".old");
FSDataInputStream is = null;
if (fs.exists(mirrorPath)) {
is = fs.open(mirrorPath);
} else if (fs.exists(oldMirrorPath)) {
is = fs.open(oldMirrorPath);
}
if (null != is) {
Set<String> labels = new AddToClusterNodeLabelsRequestPBImpl(AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
Map<NodeId, Set<String>> nodeToLabels = new ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)).getNodeToLabels();
mgr.addToCluserNodeLabels(labels);
mgr.replaceLabelsOnNode(nodeToLabels);
is.close();
}
// Open and process editlog
editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME);
if (fs.exists(editLogPath)) {
is = fs.open(editLogPath);
while (true) {
try {
// read edit log one by one
SerializedLogType type = SerializedLogType.values()[is.readInt()];
switch(type) {
case ADD_LABELS:
{
Collection<String> labels = AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is).getNodeLabelsList();
mgr.addToCluserNodeLabels(Sets.newHashSet(labels.iterator()));
break;
}
case REMOVE_LABELS:
{
Collection<String> labels = RemoveFromClusterNodeLabelsRequestProto.parseDelimitedFrom(is).getNodeLabelsList();
mgr.removeFromClusterNodeLabels(labels);
break;
}
case NODE_TO_LABELS:
{
Map<NodeId, Set<String>> map = new ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)).getNodeToLabels();
mgr.replaceLabelsOnNode(map);
break;
}
}
} catch (EOFException e) {
// EOF hit, break
break;
}
}
}
// Serialize current mirror to mirror.writing
Path writingMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".writing");
FSDataOutputStream os = fs.create(writingMirrorPath, true);
((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl.newInstance(mgr.getClusterNodeLabels())).getProto().writeDelimitedTo(os);
((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest.newInstance(mgr.getNodeLabels())).getProto().writeDelimitedTo(os);
os.close();
// Move mirror to mirror.old
if (fs.exists(mirrorPath)) {
fs.delete(oldMirrorPath, false);
fs.rename(mirrorPath, oldMirrorPath);
}
// move mirror.writing to mirror
fs.rename(writingMirrorPath, mirrorPath);
fs.delete(writingMirrorPath, false);
// remove mirror.old
fs.delete(oldMirrorPath, false);
// create a new editlog file
editlogOs = fs.create(editLogPath, true);
editlogOs.close();
LOG.info("Finished write mirror at:" + mirrorPath.toString());
LOG.info("Finished create editlog file at:" + editLogPath.toString());
}
}
19
Source : TestSwiftFileSystemRename.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFile() throws Exception {
replacedumeRenameSupported();
final Path old = new Path("/test/alice/file");
final Path newPath = new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream = fs.create(old);
final byte[] message = "Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
replacedertTrue(fs.exists(old));
rename(old, newPath, true, false, true);
final FSDataInputStream bobStream = fs.open(newPath);
final byte[] bytes = new byte[512];
final int read = bobStream.read(bytes);
bobStream.close();
final byte[] buffer = new byte[read];
System.arraycopy(bytes, 0, buffer, 0, read);
replacedertEquals(new String(message), new String(buffer));
}
19
Source : TestSwiftFileSystemExtendedContract.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testCreateFile() throws Exception {
final Path f = new Path("/test/testCreateFile");
final FSDataOutputStream fsDataOutputStream = fs.create(f);
fsDataOutputStream.close();
replacedertExists("created file", f);
}
19
Source : SwiftFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Create and then close a file
* @param path path to create
* @throws IOException on a failure
*/
protected void createEmptyFile(Path path) throws IOException {
FSDataOutputStream out = fs.create(path);
out.close();
}
19
Source : SwiftFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Get the number of parreplacedions written from the Swift Native FS APIs
* @param out output stream
* @return the number of parreplacedioned files written by the stream
*/
protected int getParreplacedionsWritten(FSDataOutputStream out) {
return SwiftNativeFileSystem.getParreplacedionsWritten(out);
}
19
Source : SwiftFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Create a file with the given data.
*
* @param path path to write
* @param sourceData source dataset
* @throws IOException on any problem
*/
protected void createFile(Path path, byte[] sourceData) throws IOException {
FSDataOutputStream out = fs.create(path);
out.write(sourceData, 0, sourceData.length);
out.close();
}
19
Source : TestUserResolve.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Creates users file with the content as the String usersFileContent.
* @param usersFilePath the path to the file that is to be created
* @param usersFileContent Content of users file
* @throws IOException
*/
private static void writeUserList(Path usersFilePath, String usersFileContent) throws IOException {
FSDataOutputStream out = null;
try {
out = fs.create(usersFilePath, true);
out.writeBytes(usersFileContent);
} finally {
if (out != null) {
out.close();
}
}
}
19
Source : TestS3NInMemoryFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeData = fs.create(new Path(TEST_PATH));
writeData.write(TEST_DATA.getBytes());
writeData.flush();
writeData.close();
FSDataInputStream readData = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readData));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
replacedert (TEST_DATA.equals(stringBuffer.toString()));
}
19
Source : TestS3InMemoryFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
writeStream.write(TEST_DATA.getBytes());
writeStream.flush();
writeStream.close();
FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
replacedert (TEST_DATA.equals(stringBuffer.toString()));
}
19
Source : CreateOp.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
// Operation
@Override
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
FSDataOutputStream os = null;
try {
Path fn = getCreateFile();
Range<Long> writeSizeRange = getConfig().getWriteSize();
long writeSize = 0;
long blockSize = determineBlockSize();
short replicationAmount = determineReplication();
if (getConfig().shouldWriteUseBlockSize()) {
writeSizeRange = getConfig().getBlockSize();
}
writeSize = Range.betweenPositive(getRandom(), writeSizeRange);
long bytesWritten = 0;
long timeTaken = 0;
int bufSize = getBufferSize();
boolean overWrite = false;
DataWriter writer = new DataWriter(getRandom());
LOG.info("Attempting to create file at " + fn + " of size " + Helper.toByteInfo(writeSize) + " using blocksize " + Helper.toByteInfo(blockSize) + " and replication amount " + replicationAmount);
{
// open & create
long startTime = Timer.now();
os = fs.create(fn, overWrite, bufSize, replicationAmount, blockSize);
timeTaken += Timer.elapsed(startTime);
// write the given length
GenerateOutput stats = writer.writeSegment(writeSize, os);
bytesWritten += stats.getBytesWritten();
timeTaken += stats.getTimeTaken();
// capture close time
startTime = Timer.now();
os.close();
os = null;
timeTaken += Timer.elapsed(startTime);
}
LOG.info("Created file at " + fn + " of size " + Helper.toByteInfo(bytesWritten) + " bytes using blocksize " + Helper.toByteInfo(blockSize) + " and replication amount " + replicationAmount + " in " + timeTaken + " milliseconds");
// collect all the stats
out.add(new OperationOutput(OutputType.LONG, getType(), ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(), ReportWriter.BYTES_WRITTEN, bytesWritten));
out.add(new OperationOutput(OutputType.LONG, getType(), ReportWriter.SUCCESSES, 1L));
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(), ReportWriter.FAILURES, 1L));
LOG.warn("Error with creating", e);
} finally {
if (os != null) {
try {
os.close();
} catch (IOException e) {
LOG.warn("Error closing create stream", e);
}
}
}
return out;
}
19
Source : EventWriter.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Event Writer is an utility clreplaced used to write events to the underlying
* stream. Typically, one event writer (which translates to one stream)
* is created per job
*/
clreplaced EventWriter {
static final String VERSION = "Avro-Json";
private FSDataOutputStream out;
private DatumWriter<Event> writer = new SpecificDatumWriter<Event>(Event.clreplaced);
private Encoder encoder;
private static final Log LOG = LogFactory.getLog(EventWriter.clreplaced);
EventWriter(FSDataOutputStream out) throws IOException {
this.out = out;
out.writeBytes(VERSION);
out.writeBytes("\n");
out.writeBytes(Event.SCHEMA$.toString());
out.writeBytes("\n");
this.encoder = EncoderFactory.get().jsonEncoder(Event.SCHEMA$, out);
}
synchronized void write(HistoryEvent event) throws IOException {
Event wrapper = new Event();
wrapper.type = event.getEventType();
wrapper.event = event.getDatum();
writer.write(wrapper, encoder);
encoder.flush();
out.writeBytes("\n");
}
void flush() throws IOException {
encoder.flush();
out.flush();
out.hflush();
}
void close() throws IOException {
try {
encoder.flush();
out.close();
out = null;
} finally {
IOUtils.cleanup(LOG, out);
}
}
private static final Schema GROUPS = Schema.createArray(JhCounterGroup.SCHEMA$);
private static final Schema COUNTERS = Schema.createArray(JhCounter.SCHEMA$);
static JhCounters toAvro(Counters counters) {
return toAvro(counters, "COUNTERS");
}
static JhCounters toAvro(Counters counters, String name) {
JhCounters result = new JhCounters();
result.name = new Utf8(name);
result.groups = new ArrayList<JhCounterGroup>(0);
if (counters == null)
return result;
for (CounterGroup group : counters) {
JhCounterGroup g = new JhCounterGroup();
g.name = new Utf8(group.getName());
g.displayName = new Utf8(group.getDisplayName());
g.counts = new ArrayList<JhCounter>(group.size());
for (Counter counter : group) {
JhCounter c = new JhCounter();
c.name = new Utf8(counter.getName());
c.displayName = new Utf8(counter.getDisplayName());
c.value = counter.getValue();
g.counts.add(c);
}
result.groups.add(g);
}
return result;
}
}
19
Source : TestJobClientGetJob.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private Path createTempFile(String filename, String contents) throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
Configuration conf = new Configuration();
FSDataOutputStream os = FileSystem.getLocal(conf).create(path);
os.writeBytes(contents);
os.close();
return path;
}
19
Source : TestTracing.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testWriteWithoutTraceHooks() throws Exception {
Path file = new Path("withoutTraceWriteTest.dat");
FSDataOutputStream stream = dfs.create(file);
for (int i = 0; i < 10; i++) {
byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
stream.write(data);
}
stream.hflush();
stream.close();
replacedert.replacedertTrue(SetSpanReceiver.SetHolder.size() == 0);
}
19
Source : TestWebHdfsFileSystemContract.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
public void testSeek() throws IOException {
final Path dir = new Path("/test/testSeek");
replacedertTrue(fs.mkdirs(dir));
{
// test zero file size
final Path zero = new Path(dir, "zero");
fs.create(zero).close();
int count = 0;
final FSDataInputStream in = fs.open(zero);
for (; in.read() != -1; count++) ;
in.close();
replacedertEquals(0, count);
}
final byte[] mydata = new byte[1 << 20];
new Random().nextBytes(mydata);
final Path p = new Path(dir, "file");
FSDataOutputStream out = fs.create(p, false, 4096, (short) 3, 1L << 17);
out.write(mydata, 0, mydata.length);
out.close();
final int one_third = mydata.length / 3;
final int two_third = one_third * 2;
{
// test seek
final int offset = one_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.seek(offset);
// read all remaining data
in.readFully(buf);
in.close();
for (int i = 0; i < buf.length; i++) {
replacedertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
{
// test position read (read the data after the two_third location)
final int offset = two_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.readFully(offset, buf);
in.close();
for (int i = 0; i < buf.length; i++) {
replacedertEquals("Position " + i + ", offset=" + offset + ", length=" + len, mydata[i + offset], buf[i]);
}
}
}
19
Source : TestWriteRead.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Common routine to do position read while open the file for write.
* After each iteration of write, do a read of the file from begin to end.
* Return 0 on success, else number of failure.
*/
private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition) throws IOException {
int countOfFailures = 0;
long byteVisibleToRead = 0;
FSDataOutputStream out = null;
byte[] outBuffer = new byte[BUFFER_SIZE];
byte[] inBuffer = new byte[BUFFER_SIZE];
for (int i = 0; i < BUFFER_SIZE; i++) {
outBuffer[i] = (byte) (i & 0x00ff);
}
try {
Path path = getFullyQualifiedPath(fname);
long fileLengthBeforeOpen = 0;
if (ifExists(path)) {
if (truncateOption) {
out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE)) : mfs.create(path, truncateOption);
LOG.info("File already exists. File open with Truncate mode: " + path);
} else {
out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path);
fileLengthBeforeOpen = getFileLengthFromNN(path);
LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: " + path);
}
} else {
out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path);
}
long totalByteWritten = fileLengthBeforeOpen;
long totalByteVisible = fileLengthBeforeOpen;
long totalByteWrittenButNotVisible = 0;
boolean toFlush;
for (int i = 0; i < loopN; i++) {
toFlush = (i % 2) == 0;
writeData(out, outBuffer, chunkSize);
totalByteWritten += chunkSize;
if (toFlush) {
out.hflush();
totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
totalByteWrittenButNotVisible = 0;
} else {
totalByteWrittenButNotVisible += chunkSize;
}
if (verboseOption) {
LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten + ". TotalByteVisible = " + totalByteVisible + " to file " + fname);
}
byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
readmsg = "preplaced: reader sees expected number of visible byte. " + readmsg + " [preplaced]";
} else {
countOfFailures++;
readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
if (abortTestOnFailure) {
throw new IOException(readmsg);
}
}
LOG.info(readmsg);
}
// test the automatic flush after close
writeData(out, outBuffer, chunkSize);
totalByteWritten += chunkSize;
totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
totalByteWrittenButNotVisible += 0;
out.close();
byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
String readmsg;
if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
readmsg = "preplaced: reader sees expected number of visible byte on close. " + readmsg2 + " [preplaced]";
} else {
countOfFailures++;
readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]";
LOG.info(readmsg);
if (abortTestOnFailure)
throw new IOException(readmsg);
}
// now check if NN got the same length
long lenFromFc = getFileLengthFromNN(path);
if (lenFromFc != byteVisibleToRead) {
readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]";
throw new IOException(readmsg);
}
} catch (IOException e) {
throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. " + "Total Byte Read so far = " + byteVisibleToRead, e);
} finally {
if (out != null)
out.close();
}
return -countOfFailures;
}
19
Source : TestLeaseRecovery2.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test the NameNode's revoke lease on current lease holder function.
* @throws Exception
*/
@Test
public void testImmediateRecoveryOfLease() throws Exception {
// create a file
// write bytes into the file.
byte[] actual = new byte[FILE_SIZE];
int size = AppendTestUtil.nextInt(FILE_SIZE);
Path filepath = createFile("/immediateRecoverLease-shortlease", size, true);
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery on next attempt to write-for-open.
cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
recoverLeaseUsingCreate(filepath);
verifyFile(dfs, filepath, actual, size);
// test recoverLease
// set the soft limit to be 1 hour but recoverLease should
// close the file immediately
cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD);
size = AppendTestUtil.nextInt(FILE_SIZE);
filepath = createFile("/immediateRecoverLease-longlease", size, false);
// test recoverLease from a different client
recoverLease(filepath, null);
verifyFile(dfs, filepath, actual, size);
// test recoverlease from the same client
size = AppendTestUtil.nextInt(FILE_SIZE);
filepath = createFile("/immediateRecoverLease-sameclient", size, false);
// create another file using the same client
Path filepath1 = new Path(filepath.toString() + AppendTestUtil.nextInt());
FSDataOutputStream stm = dfs.create(filepath1, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
// recover the first file
recoverLease(filepath, dfs);
verifyFile(dfs, filepath, actual, size);
// continue to write to the second file
stm.write(buffer, 0, size);
stm.close();
verifyFile(dfs, filepath1, actual, size);
}
19
Source : TestLease.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient dfs = new DFSClient(null, spyNN, conf, null);
byte[] buf = new byte[1024];
FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
DFSInputStream c_in = dfs.open(dirString + "c");
FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
// stub the renew method.
doThrow(new RemoteException(InvalidToken.clreplaced.getName(), "Your token is worthless")).when(spyNN).renewLease(anyString());
// We don't need to wait the lease renewer thread to act.
// call renewLease() manually.
// make it look like the soft limit has been exceeded.
LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
dfs.lastLeaseRenewal = Time.now() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
} catch (IOException e) {
}
// Things should continue to work it preplacedes hard limit without
// renewing.
try {
d_out.write(buf, 0, 1024);
LOG.info("Write worked beyond the soft limit as expected.");
} catch (IOException e) {
replacedert.fail("Write failed.");
}
// make it look like the hard limit has been exceeded.
dfs.lastLeaseRenewal = Time.now() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
// this should not work.
try {
d_out.write(buf, 0, 1024);
d_out.close();
replacedert.fail("Write did not fail even after the fatal lease renewal failure");
} catch (IOException e) {
LOG.info("Write failed as expected. ", e);
}
// If aborted, the renewer should be empty. (no reference to clients)
Thread.sleep(1000);
replacedert.replacedertTrue(originalRenewer.isEmpty());
// unstub
doNothing().when(spyNN).renewLease(anyString());
// existing input streams should work
try {
int num = c_in.read(buf, 0, 1);
if (num != 1) {
replacedert.fail("Failed to read 1 byte");
}
c_in.close();
} catch (IOException e) {
LOG.error("Read failed with ", e);
replacedert.fail("Read after lease renewal failure failed");
}
// new file writes should work.
try {
c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
} catch (IOException e) {
LOG.error("Write failed with ", e);
replacedert.fail("Write failed");
}
} finally {
cluster.shutdown();
}
}
19
Source : TestFileCreation.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
//
// writes specified bytes to file.
//
public static void writeFile(FSDataOutputStream stm, int size) throws IOException {
byte[] buffer = AppendTestUtil.randomBytes(seed, size);
stm.write(buffer, 0, size);
}
19
Source : TestFileCreation.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
//
// writes to file but does not close it
//
static void writeFile(FSDataOutputStream stm) throws IOException {
writeFile(stm, fileSize);
}
19
Source : TestFileConcurrentReader.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test that that writes to an incomplete block are available to a reader
*/
@Test(timeout = 30000)
public void testUnfinishedBlockRead() throws IOException {
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
int partialBlockSize = blockSize / 2;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
19
Source : TestFileConcurrentReader.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* test case: if the BlockSender decides there is only one packet to send,
* the previous computation of the pktSize based on transferToAllowed
* would result in too small a buffer to do the buffer-copy needed
* for partial chunks.
*/
@Test(timeout = 30000)
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
// check that / exists
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
final FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
final int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
final int partialBlockSize = bytesPerChecksum - 1;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
19
Source : TestFileConcurrentReader.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
private void writeFileAndSync(FSDataOutputStream stm, int size) throws IOException {
byte[] buffer = DFSTestUtil.generateSequentialBytes(0, size);
stm.write(buffer, 0, size);
stm.hflush();
}
19
Source : TestFileAppend4.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/* File Append tests for HDFS-200 & HDFS-142, specifically focused on:
* using append()/sync() to recover block information
*/
public clreplaced TestFileAppend4 {
static final Log LOG = LogFactory.getLog(TestFileAppend4.clreplaced);
static final long BLOCK_SIZE = 1024;
// don't align on bytes/checksum
static final long BBW_SIZE = 500;
static final Object[] NO_ARGS = new Object[] {};
Configuration conf;
MiniDFSCluster cluster;
Path file1;
FSDataOutputStream stm;
final boolean simulatedStorage = false;
{
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.clreplaced)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
@Before
public void setUp() throws Exception {
this.conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// lower heartbeat interval for fast recognition of DN death
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
// handle under-replicated blocks quickly (for replication replacederts)
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// handle failures in the DFSClient pipeline quickly
// (for cluster.shutdown(); fs.close() idiom)
conf.setInt("ipc.client.connect.max.retries", 1);
}
/*
* Recover file.
* Try and open file in append mode.
* Doing this, we get a hold of the file that crashed writer
* was writing to. Once we have it, close it. This will
* allow subsequent reader to see up to last sync.
* NOTE: This is the same algorithm that HBase uses for file recovery
* @param fs
* @throws Exception
*/
private void recoverFile(final FileSystem fs) throws Exception {
LOG.info("Recovering File Lease");
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request
cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
// Trying recovery
int tries = 60;
boolean recovered = false;
FSDataOutputStream out = null;
while (!recovered && tries-- > 0) {
try {
out = fs.append(file1);
LOG.info("Successfully opened for append");
recovered = true;
} catch (IOException e) {
LOG.info("Failed open for append, waiting on lease recovery");
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
// ignore it and try again
}
}
}
if (out != null) {
out.close();
}
if (!recovered) {
fail("Recovery should take < 1 min");
}
LOG.info("Past out lease recovery");
}
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout = 60000)
public void testRecoverFinalizedBlock() throws Throwable {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
// Delay completeFile
GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (ExtendedBlock) anyObject(), anyLong());
DFSClient client = new DFSClient(null, spyNN, conf, null);
file1 = new Path("/testRecoverFinalized");
final OutputStream stm = client.create("/testRecoverFinalized", true);
// write 1/2 block
AppendTestUtil.write(stm, 0, 4096);
final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
Thread t = new Thread() {
@Override
public void run() {
try {
stm.close();
} catch (Throwable t) {
err.set(t);
}
}
};
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
// At this point, the block is finalized on the DNs, but the file
// has not been completed in the NN.
// Lose the leases
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1 = cluster.getFileSystem();
FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
// We expect that close will get a "File is not open"
// error.
Throwable thrownByClose = err.get();
replacedertNotNull(thrownByClose);
replacedertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("No lease on /testRecoverFinalized"))
throw thrownByClose;
} finally {
cluster.shutdown();
}
}
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, recovers a file from another writer,
* starts writing from that writer, and then has the old lease holder
* call completeFile
*/
@Test(timeout = 60000)
public void testCompleteOtherLeaseHoldersFile() throws Throwable {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
// Delay completeFile
GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (ExtendedBlock) anyObject(), anyLong());
DFSClient client = new DFSClient(null, spyNN, conf, null);
file1 = new Path("/testCompleteOtherLease");
final OutputStream stm = client.create("/testCompleteOtherLease", true);
// write 1/2 block
AppendTestUtil.write(stm, 0, 4096);
final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
Thread t = new Thread() {
@Override
public void run() {
try {
stm.close();
} catch (Throwable t) {
err.set(t);
}
}
};
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
// At this point, the block is finalized on the DNs, but the file
// has not been completed in the NN.
// Lose the leases
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1 = cluster.getFileSystem();
FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Opening file for append from new fs");
FSDataOutputStream appenderStream = fs2.append(file1);
LOG.info("Writing some data from new appender");
AppendTestUtil.write(appenderStream, 0, 4096);
LOG.info("Telling old close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
// We expect that close will get a "Lease mismatch"
// error.
Throwable thrownByClose = err.get();
replacedertNotNull(thrownByClose);
replacedertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("Lease mismatch"))
throw thrownByClose;
// The appender should be able to close properly
appenderStream.close();
} finally {
cluster.shutdown();
}
}
/**
* Test the updation of NeededReplications for the Appended Block
*/
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
DistributedFileSystem fileSystem = null;
try {
// create a file.
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Append to the file.
FSDataOutputStream append = fileSystem.append(f);
append.write("/testAppend".getBytes());
append.close();
// Start a new datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
Configuration conf = new Configuration();
// lower heartbeat interval for fast recognition of DN
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem = null;
try {
// create a file with replication 3
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
// Shut down all DNs that have the last block location for the file
LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
List<DataNode> dnsOfCluster = cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
for (DataNode dn : dnsOfCluster) {
for (DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
// Wait till 0 replication is recognized
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
// Append to the file, at this state there are 3 live DNs but none of them
// have the block.
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
} catch (IOException e) {
LOG.info("Expected exception: ", e);
}
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
replacedertTrue("File should remain closed", !inode.isUnderConstruction());
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
}
19
Source : TestFileAppend4.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Test the updation of NeededReplications for the Appended Block
*/
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
DistributedFileSystem fileSystem = null;
try {
// create a file.
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Append to the file.
FSDataOutputStream append = fileSystem.append(f);
append.write("/testAppend".getBytes());
append.close();
// Start a new datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
19
Source : TestFileAppend3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* TC2: Append on non-block boundary.
* @throws IOException an exception might be thrown
*/
@Test
public void testTC2() throws Exception {
final Path p = new Path("/TC2/foo");
System.out.println("p=" + p);
// a. Create file with one and a half block of data. Close file.
final int len1 = (int) (BLOCK_SIZE + BLOCK_SIZE / 2);
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
AppendTestUtil.check(fs, p, len1);
// Reopen file to append quarter block of data. Close file.
final int len2 = (int) BLOCK_SIZE / 4;
{
FSDataOutputStream out = fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
// b. Reopen file and read 1.75 blocks of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
19
Source : TestFileAppend3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* Append to a partial CRC chunk and
* the first write does not fill up the partial CRC trunk
* *
* @throws IOException
*/
@Test
public void testAppendToPartialChunk() throws IOException {
final Path p = new Path("/partialChunk/foo");
final int fileLen = 513;
System.out.println("p=" + p);
byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
// create a new file.
FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, 1);
// create 1 byte file
stm.write(fileContents, 0, 1);
stm.close();
System.out.println("Wrote 1 byte and closed the file " + p);
// append to file
stm = fs.append(p);
// Append to a partial CRC trunk
stm.write(fileContents, 1, 1);
stm.hflush();
// The partial CRC trunk is not full yet and close the file
stm.close();
System.out.println("Append 1 byte and closed the file " + p);
// write the remainder of the file
stm = fs.append(p);
// ensure getPos is set to reflect existing size of the file
replacedertEquals(2, stm.getPos());
// append to a partial CRC trunk
stm.write(fileContents, 2, 1);
// The partial chunk is not full yet, force to send a packet to DN
stm.hflush();
System.out.println("Append and flush 1 byte");
// The partial chunk is not full yet, force to send another packet to DN
stm.write(fileContents, 3, 2);
stm.hflush();
System.out.println("Append and flush 2 byte");
// fill up the partial chunk and close the file
stm.write(fileContents, 5, fileLen - 5);
stm.close();
System.out.println("Flush 508 byte and closed the file " + p);
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to append to a partial chunk");
}
19
Source : TestFileAppend3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* TC1: Append on block boundary.
* @throws IOException an exception might be thrown
*/
@Test
public void testTC1() throws Exception {
final Path p = new Path("/TC1/foo");
System.out.println("p=" + p);
// a. Create file and write one block of data. Close file.
final int len1 = (int) BLOCK_SIZE;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
// Reopen file to append. Append half block of data. Close file.
final int len2 = (int) BLOCK_SIZE / 2;
{
FSDataOutputStream out = fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
// b. Reopen file and read 1.5 blocks worth of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
19
Source : TestFileAppend3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* TC12: Append to partial CRC chunk
* @throws IOException an exception might be thrown
*/
@Test
public void testTC12() throws Exception {
final Path p = new Path("/TC12/foo");
System.out.println("p=" + p);
// a. Create file with a block size of 64KB
// and a default io.bytes.per.checksum of 512 bytes.
// Write 25687 bytes of data. Close file.
final int len1 = 25687;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
// b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
final int len2 = 5877;
{
FSDataOutputStream out = fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
// c. Reopen file and read 25687+5877 bytes of data from file. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
19
Source : TestFileAppend3.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
// Do small appends.
void doSmallAppends(Path file, DistributedFileSystem fs, int iterations) throws IOException {
for (int i = 0; i < iterations; i++) {
FSDataOutputStream stm;
try {
stm = fs.append(file);
} catch (IOException e) {
// If another thread is already appending, skip this time.
continue;
}
// Failure in write or close will be terminal.
AppendTestUtil.write(stm, 0, 123);
stm.close();
}
}
19
Source : TestFileAppend.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
//
// writes to file but does not close it
//
private void writeFile(FSDataOutputStream stm) throws IOException {
byte[] buffer = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
stm.write(buffer);
}
19
Source : TestDFSOutputStream.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
/**
* The close() method of DFSOutputStream should never throw the same exception
* twice. See HDFS-5335 for details.
*/
@Test
public void testCloseTwice() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
FSDataOutputStream os = fs.create(new Path("/test"));
DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os, "wrappedStream");
@SuppressWarnings("unchecked")
AtomicReference<IOException> ex = (AtomicReference<IOException>) Whitebox.getInternalState(dos, "lastException");
replacedert.replacedertEquals(null, ex.get());
dos.close();
IOException dummy = new IOException("dummy");
ex.set(dummy);
try {
dos.close();
} catch (IOException e) {
replacedert.replacedertEquals(e, dummy);
}
replacedert.replacedertEquals(null, ex.get());
dos.close();
}
19
Source : TestChecksumFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testCorruptedChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testCorruptChecksum");
Path checksumPath = localFs.getChecksumFile(testPath);
// write a file to generate checksum
FSDataOutputStream out = localFs.create(testPath, true);
out.write("testing 1 2 3".getBytes());
out.close();
replacedertTrue(localFs.exists(checksumPath));
FileStatus stat = localFs.getFileStatus(checksumPath);
// alter file directly so checksum is invalid
out = localFs.getRawFileSystem().create(testPath, true);
out.write("testing stale checksum".getBytes());
out.close();
replacedertTrue(localFs.exists(checksumPath));
// checksum didn't change on disk
replacedertEquals(stat, localFs.getFileStatus(checksumPath));
Exception e = null;
try {
localFs.setVerifyChecksum(true);
readFile(localFs, testPath, 1024);
} catch (ChecksumException ce) {
e = ce;
} finally {
replacedertNotNull("got checksum error", e);
}
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024);
replacedertEquals("testing stale checksum", str);
}
19
Source : TestChecksumFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testVerifyChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testPath");
Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
FSDataOutputStream fout = localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout = localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
localFs.delete(localFs.getChecksumFile(testPath), true);
replacedertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
// copying the wrong checksum file
FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs, localFs.getChecksumFile(testPath), false, true, localFs.getConf());
replacedertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead = false;
try {
readFile(localFs, testPath, 1024);
} catch (ChecksumException ie) {
errorRead = true;
}
replacedertTrue("error reading", errorRead);
// now setting verify false, the read should succeed
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024).toString();
replacedertTrue("read", "testing".equals(str));
}
19
Source : TestChecksumFileSystem.java
with Apache License 2.0
from NJUJYB
with Apache License 2.0
from NJUJYB
@Test
public void testMultiChunkFile() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testMultiChunk");
FSDataOutputStream fout = localFs.create(testPath);
for (int i = 0; i < 1000; i++) {
fout.write(("testing" + i).getBytes());
}
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
}
See More Examples