org.apache.hadoop.io.IOUtils.closeStream()

Here are the examples of the java api org.apache.hadoop.io.IOUtils.closeStream() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

212 Examples 7

19 Source : JsonSerDeser.java
with Apache License 2.0
from NJUJYB

/**
 * Convert from a JSON file
 * @param resource input file
 * @return the parsed JSON
 * @throws IOException IO problems
 * @throws JsonMappingException failure to map from the JSON to this clreplaced
 */
@SuppressWarnings({ "IOResourceOpenedButNotSafelyClosed" })
public synchronized T fromResource(String resource) throws IOException, JsonParseException, JsonMappingException {
    InputStream resStream = null;
    try {
        resStream = this.getClreplaced().getResourcereplacedtream(resource);
        if (resStream == null) {
            throw new FileNotFoundException(resource);
        }
        return mapper.readValue(resStream, clreplacedType);
    } catch (IOException e) {
        LOG.error("Exception while parsing json resource {}: {}", resource, e);
        throw e;
    } finally {
        IOUtils.closeStream(resStream);
    }
}

19 Source : TestSwiftFileSystemExtendedContract.java
with Apache License 2.0
from NJUJYB

@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testWriteReadFile() throws Exception {
    final Path f = new Path("/test/test");
    final FSDataOutputStream fsDataOutputStream = fs.create(f);
    final String message = "Test string";
    fsDataOutputStream.write(message.getBytes());
    fsDataOutputStream.close();
    replacedertExists("created file", f);
    FSDataInputStream open = null;
    try {
        open = fs.open(f);
        final byte[] bytes = new byte[512];
        final int read = open.read(bytes);
        final byte[] buffer = new byte[read];
        System.arraycopy(bytes, 0, buffer, 0, read);
        replacedertEquals(message, new String(buffer));
    } finally {
        fs.delete(f, false);
        IOUtils.closeStream(open);
    }
}

19 Source : TestSeek.java
with Apache License 2.0
from NJUJYB

@After
public void cleanFile() {
    IOUtils.closeStream(instream);
    instream = null;
}

19 Source : TestThrottledInputStream.java
with Apache License 2.0
from NJUJYB

private void writeToFile(File tmpFile, long sizeInKB) throws IOException {
    OutputStream out = new FileOutputStream(tmpFile);
    try {
        byte[] buffer = new byte[1024];
        for (long index = 0; index < sizeInKB; index++) {
            out.write(buffer);
        }
    } finally {
        IOUtils.closeStream(out);
    }
}

19 Source : TestFileBasedCopyListing.java
with Apache License 2.0
from NJUJYB

private void checkResult(Path listFile, int count) throws IOException {
    if (count == 0) {
        return;
    }
    int recCount = 0;
    SequenceFile.Reader reader = new SequenceFile.Reader(config, SequenceFile.Reader.file(listFile));
    try {
        Text relPath = new Text();
        CopyListingFileStatus fileStatus = new CopyListingFileStatus();
        while (reader.next(relPath, fileStatus)) {
            if (fileStatus.isDirectory() && relPath.toString().equals("")) {
                // ignore root with empty relPath, which is an entry to be
                // used for preserving root attributes etc.
                continue;
            }
            replacedert.replacedertEquals(fileStatus.getPath().toUri().getPath(), map.get(relPath.toString()));
            recCount++;
        }
    } finally {
        IOUtils.closeStream(reader);
    }
    replacedert.replacedertEquals(recCount, count);
}

19 Source : UniformSizeInputFormat.java
with Apache License 2.0
from NJUJYB

private List<InputSplit> getSplits(Configuration configuration, int numSplits, long totalSizeBytes) throws IOException {
    List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
    long nBytesPerSplit = (long) Math.ceil(totalSizeBytes * 1.0 / numSplits);
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();
    long currentSplitSize = 0;
    long lastSplitStart = 0;
    long lastPosition = 0;
    final Path listingFilePath = getListingFilePath(configuration);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Average bytes per map: " + nBytesPerSplit + ", Number of maps: " + numSplits + ", total size: " + totalSizeBytes);
    }
    SequenceFile.Reader reader = null;
    try {
        reader = getListingFileReader(configuration);
        while (reader.next(srcRelPath, srcFileStatus)) {
            // If adding the current file would cause the bytes per map to exceed
            // limit. Add the current file to new split
            if (currentSplitSize + srcFileStatus.getLen() > nBytesPerSplit && lastPosition != 0) {
                FileSplit split = new FileSplit(listingFilePath, lastSplitStart, lastPosition - lastSplitStart, null);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Creating split : " + split + ", bytes in split: " + currentSplitSize);
                }
                splits.add(split);
                lastSplitStart = lastPosition;
                currentSplitSize = 0;
            }
            currentSplitSize += srcFileStatus.getLen();
            lastPosition = reader.getPosition();
        }
        if (lastPosition > lastSplitStart) {
            FileSplit split = new FileSplit(listingFilePath, lastSplitStart, lastPosition - lastSplitStart, null);
            if (LOG.isDebugEnabled()) {
                LOG.info("Creating split : " + split + ", bytes in split: " + currentSplitSize);
            }
            splits.add(split);
        }
    } finally {
        IOUtils.closeStream(reader);
    }
    return splits;
}

19 Source : TestBestEffortLongFile.java
with Apache License 2.0
from NJUJYB

@Test
public void testGetSet() throws IOException {
    BestEffortLongFile f = new BestEffortLongFile(FILE, 12345L);
    try {
        // Before the file exists, should return default.
        replacedertEquals(12345L, f.get());
        // And first access should open it.
        replacedertTrue(FILE.exists());
        Random r = new Random();
        for (int i = 0; i < 100; i++) {
            long newVal = r.nextLong();
            // Changing the value should be reflected in the next get() call.
            f.set(newVal);
            replacedertEquals(newVal, f.get());
            // And should be reflected in a new instance (ie it actually got
            // written to the file)
            BestEffortLongFile f2 = new BestEffortLongFile(FILE, 999L);
            try {
                replacedertEquals(newVal, f2.get());
            } finally {
                IOUtils.closeStream(f2);
            }
        }
    } finally {
        IOUtils.closeStream(f);
    }
}

19 Source : TestFileAppendRestart.java
with Apache License 2.0
from NJUJYB

/**
 * Test to append to the file, when one of datanode in the existing pipeline
 * is down.
 */
@Test
public void testAppendWithPipelineRecovery() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    FSDataOutputStream out = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true).manageNameDfsDirs(true).numDataNodes(4).racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" }).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test1");
        out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, 1024);
        out.close();
        cluster.stopDataNode(3);
        out = fs.append(path);
        AppendTestUtil.write(out, 1024, 1024);
        out.close();
        cluster.restartNameNode(true);
        AppendTestUtil.check(fs, path, 2048);
    } finally {
        IOUtils.closeStream(out);
        if (null != cluster) {
            cluster.shutdown();
        }
    }
}

19 Source : TestFileAppendRestart.java
with Apache License 2.0
from NJUJYB

/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    FSDataOutputStream stream = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        FileSystem fs = cluster.getFileSystem();
        File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1));
        EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
        Path p1 = new Path("/block-boundaries");
        writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_ADD to reopen file
        // OP_ADD_BLOCK for second block
        // OP_CLOSE to close file
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        Path p2 = new Path("/not-block-boundaries");
        writeAndAppend(fs, p2, BLOCK_SIZE / 2, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_ADD to re-establish the lease
        // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
        // OP_ADD_BLOCK at the start of the second block
        // OP_CLOSE to close file
        // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
        // in addition to the ones above
        replacedertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        replacedertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
        replacedertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        replacedertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        cluster.restartNameNode();
        AppendTestUtil.check(fs, p1, 2 * BLOCK_SIZE);
        AppendTestUtil.check(fs, p2, 3 * BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stream);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

19 Source : FSImageTestUtil.java
with Apache License 2.0
from NJUJYB

/**
 * @param editLog a path of an edit log file
 * @return the count of each type of operation in the log file
 * @throws Exception if there is an error reading it
 */
public static EnumMap<FSEditLogOpCodes, Holder<Integer>> countEditLogOpTypes(File editLog) throws Exception {
    EditLogInputStream elis = new EditLogFileInputStream(editLog);
    try {
        return countEditLogOpTypes(elis);
    } finally {
        IOUtils.closeStream(elis);
    }
}

19 Source : AtomicFileOutputStream.java
with Apache License 2.0
from NJUJYB

@Override
public void close() throws IOException {
    boolean triedToClose = false, success = false;
    try {
        flush();
        ((FileOutputStream) out).getChannel().force(true);
        triedToClose = true;
        super.close();
        success = true;
    } finally {
        if (success) {
            boolean renamed = tmpFile.renameTo(origFile);
            if (!renamed) {
                // On windows, renameTo does not replace.
                if (origFile.exists() && !origFile.delete()) {
                    throw new IOException("Could not delete original file " + origFile);
                }
                try {
                    NativeIO.renameTo(tmpFile, origFile);
                } catch (NativeIOException e) {
                    throw new IOException("Could not rename temporary file " + tmpFile + " to " + origFile + " due to failure in native rename. " + e.toString());
                }
            }
        } else {
            if (!triedToClose) {
                // If we failed when flushing, try to close it to not leak an FD
                IOUtils.closeStream(out);
            }
            // close wasn't successful, try to delete the tmp file
            if (!tmpFile.delete()) {
                LOG.warn("Unable to delete tmp file " + tmpFile);
            }
        }
    }
}

19 Source : EditLogFileInputStream.java
with Apache License 2.0
from NJUJYB

static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException {
    EditLogFileInputStream in;
    try {
        in = new EditLogFileInputStream(file);
        // causes us to read the header
        in.getVersion(true);
    } catch (LogHeaderCorruptException e) {
        // If the header is malformed or the wrong value, this indicates a corruption
        LOG.warn("Log file " + file + " has no valid header", e);
        return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID, true);
    }
    try {
        return FSEditLogLoader.validateEditLog(in);
    } finally {
        IOUtils.closeStream(in);
    }
}

19 Source : ReplicaOutputStreams.java
with Apache License 2.0
from NJUJYB

@Override
public void close() {
    IOUtils.closeStream(dataOut);
    IOUtils.closeStream(checksumOut);
}

19 Source : ReplicaInputStreams.java
with Apache License 2.0
from NJUJYB

@Override
public void close() {
    IOUtils.closeStream(dataIn);
    IOUtils.closeStream(checksumIn);
}

19 Source : NameNodeConnector.java
with Apache License 2.0
from NJUJYB

@Override
public void close() {
    keyManager.close();
    // close the output file
    IOUtils.closeStream(out);
    if (fs != null) {
        try {
            fs.delete(idPath, true);
        } catch (IOException ioe) {
            LOG.warn("Failed to delete " + idPath, ioe);
        }
    }
}

19 Source : Journal.java
with Apache License 2.0
from NJUJYB

/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
    IOUtils.closeStream(committedTxnId);
    File currentDir = storage.getSingularStorageDir().getCurrentDir();
    this.lastPromisedEpoch = new PersistentLongFile(new File(currentDir, LAST_PROMISED_FILENAME), 0);
    this.lastWriterEpoch = new PersistentLongFile(new File(currentDir, LAST_WRITER_EPOCH), 0);
    this.committedTxnId = new BestEffortLongFile(new File(currentDir, COMMITTED_TXID_FILENAME), HdfsConstants.INVALID_TXID);
}

19 Source : Journal.java
with Apache License 2.0
from NJUJYB

/**
 * Unlock and release resources.
 */
// Closeable
@Override
public void close() throws IOException {
    storage.close();
    IOUtils.closeStream(committedTxnId);
    IOUtils.closeStream(curSegment);
}

19 Source : AbstractContractSeekTest.java
with Apache License 2.0
from NJUJYB

@Override
public void teardown() throws Exception {
    IOUtils.closeStream(instream);
    instream = null;
    super.teardown();
}

19 Source : TestConfiguration.java
with Apache License 2.0
from NJUJYB

/**
 * Tests use of multi-byte characters in property names and values.  This test
 * round-trips multi-byte string literals through saving and loading of config
 * and replacederts that the same values were read.
 */
public void testMultiByteCharacters() throws IOException {
    String priorDefaultEncoding = System.getProperty("file.encoding");
    try {
        System.setProperty("file.encoding", "US-ASCII");
        String name = "multi_byte_\u611b_name";
        String value = "multi_byte_\u0641_value";
        out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(CONFIG_MULTI_BYTE), "UTF-8"));
        startConfig();
        declareProperty(name, value, value);
        endConfig();
        Configuration conf = new Configuration(false);
        conf.addResource(new Path(CONFIG_MULTI_BYTE));
        replacedertEquals(value, conf.get(name));
        FileOutputStream fos = new FileOutputStream(CONFIG_MULTI_BYTE_SAVED);
        try {
            conf.writeXml(fos);
        } finally {
            IOUtils.closeStream(fos);
        }
        conf = new Configuration(false);
        conf.addResource(new Path(CONFIG_MULTI_BYTE_SAVED));
        replacedertEquals(value, conf.get(name));
    } finally {
        System.setProperty("file.encoding", priorDefaultEncoding);
    }
}

19 Source : CommandWithDestination.java
with Apache License 2.0
from NJUJYB

/**
 * Copies the source file to the target.
 * @param src item to copy
 * @param target where to copy the item
 * @throws IOException if copy fails
 */
protected void copyFileToTarget(PathData src, PathData target) throws IOException {
    final boolean preserveRawXattrs = checkPathsForReservedRaw(src.path, target.path);
    src.fs.setVerifyChecksum(verifyChecksum);
    InputStream in = null;
    try {
        in = src.fs.open(src.path);
        copyStreamToTarget(in, target);
        preserveAttributes(src, target, preserveRawXattrs);
    } finally {
        IOUtils.closeStream(in);
    }
}

19 Source : TestFileAppendRestart.java
with Apache License 2.0
from naver

/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // Turn off persistent IPC, so that the DFSClient can survive NN restart
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    MiniDFSCluster cluster = null;
    FSDataOutputStream stream = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        FileSystem fs = cluster.getFileSystem();
        File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1));
        EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
        Path p1 = new Path("/block-boundaries");
        writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_APPEND to reopen file
        // OP_ADD_BLOCK for second block
        // OP_CLOSE to close file
        replacedertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        replacedertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        Path p2 = new Path("/not-block-boundaries");
        writeAndAppend(fs, p2, BLOCK_SIZE / 2, BLOCK_SIZE);
        counts = FSImageTestUtil.countEditLogOpTypes(editLog);
        // OP_ADD to create file
        // OP_ADD_BLOCK for first block
        // OP_CLOSE to close file
        // OP_APPEND to re-establish the lease
        // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
        // OP_ADD_BLOCK at the start of the second block
        // OP_CLOSE to close file
        // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
        // in addition to the ones above
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
        replacedertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
        replacedertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
        replacedertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
        replacedertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
        cluster.restartNameNode();
        AppendTestUtil.check(fs, p1, 2 * BLOCK_SIZE);
        AppendTestUtil.check(fs, p2, 3 * BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stream);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

19 Source : ChunkRecordReader.java
with Apache License 2.0
from greenplum-db

/**
 * Translates the FSDataInputStream into a DFSInputStream.
 */
private DFSInputStream getInputStream() throws IncompatibleInputStreamException {
    InputStream inputStream = fileIn.getWrappedStream();
    if (inputStream instanceof DFSInputStream) {
        return (DFSInputStream) inputStream;
    } else {
        IOUtils.closeStream(fileIn);
        throw new IncompatibleInputStreamException(inputStream.getClreplaced());
    }
}

19 Source : TestAllTables.java
with Apache License 2.0
from dkhadoop

public void testMulreplacedableImportWithExclude() throws IOException {
    String exclude = this.tableNames.get(0);
    String[] argv = getArgv(true, new String[] { exclude });
    runImport(new ImportAllTablesTool(), argv);
    Path warehousePath = new Path(this.getWarehouseDir());
    int i = 0;
    for (String tableName : this.tableNames) {
        Path tablePath = new Path(warehousePath, tableName);
        Path filePath = new Path(tablePath, "part-m-00000");
        // dequeue the expected value for this table. This
        // list has the same order as the tableNames list.
        String expectedVal = Integer.toString(i++) + "," + this.expectedStrings.get(0);
        this.expectedStrings.remove(0);
        BufferedReader reader = null;
        if (!isOnPhysicalCluster()) {
            reader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(filePath.toString()))));
        } else {
            FSDataInputStream dis;
            FileSystem dfs = FileSystem.get(getConf());
            if (tableName.equals(exclude)) {
                try {
                    dis = dfs.open(filePath);
                    replacedertFalse(true);
                } catch (FileNotFoundException e) {
                    // Success
                    continue;
                }
            } else {
                dis = dfs.open(filePath);
            }
            reader = new BufferedReader(new InputStreamReader(dis));
        }
        try {
            String line = reader.readLine();
            replacedertEquals("Table " + tableName + " expected a different string", expectedVal, line);
        } finally {
            IOUtils.closeStream(reader);
        }
    }
}

19 Source : TestAllTables.java
with Apache License 2.0
from aliyun

public void testMulreplacedableImportWithExclude() throws IOException {
    String exclude = this.tableNames.get(0);
    String[] argv = getArgv(null, new String[] { exclude });
    runImport(new ImportAllTablesTool(), argv);
    Path warehousePath = new Path(this.getWarehouseDir());
    int i = 0;
    for (String tableName : this.tableNames) {
        Path tablePath = new Path(warehousePath, tableName);
        Path filePath = new Path(tablePath, "part-m-00000");
        // dequeue the expected value for this table. This
        // list has the same order as the tableNames list.
        String expectedVal = Integer.toString(i++) + "," + this.expectedStrings.get(0);
        this.expectedStrings.remove(0);
        BufferedReader reader = null;
        if (!isOnPhysicalCluster()) {
            reader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(filePath.toString()))));
        } else {
            FSDataInputStream dis;
            FileSystem dfs = FileSystem.get(getConf());
            if (tableName.equals(exclude)) {
                try {
                    dis = dfs.open(filePath);
                    replacedertFalse(true);
                } catch (FileNotFoundException e) {
                    // Success
                    continue;
                }
            } else {
                dis = dfs.open(filePath);
            }
            reader = new BufferedReader(new InputStreamReader(dis));
        }
        try {
            String line = reader.readLine();
            replacedertEquals("Table " + tableName + " expected a different string", expectedVal, line);
        } finally {
            IOUtils.closeStream(reader);
        }
    }
}

18 Source : FileSystemCat.java
with Apache License 2.0
from whirlys

public static void main(String[] args) throws Exception {
    String uri = "/hadoop/hello.txt";
    FileSystem fs = SysUtil.getFileSystem();
    InputStream in = null;
    try {
        in = fs.open(new Path(uri));
        IOUtils.copyBytes(in, System.out, 4096, false);
    } finally {
        IOUtils.closeStream(in);
    }
}

18 Source : CuratorService.java
with Apache License 2.0
from NJUJYB

/**
 * Close the ZK connection if it is open
 */
@Override
protected void serviceStop() throws Exception {
    IOUtils.closeStream(curator);
    super.serviceStop();
}

18 Source : TestThrottledInputStream.java
with Apache License 2.0
from NJUJYB

private long copyAndreplacedert(File tmpFile, File outFile, long maxBandwidth, float factor, int sleepTime, CB flag) throws IOException {
    long bandwidth;
    ThrottledInputStream in;
    long maxBPS = (long) (maxBandwidth / factor);
    if (maxBandwidth == 0) {
        in = new ThrottledInputStream(new FileInputStream(tmpFile));
    } else {
        in = new ThrottledInputStream(new FileInputStream(tmpFile), maxBPS);
    }
    OutputStream out = new FileOutputStream(outFile);
    try {
        if (flag == CB.BUFFER) {
            copyBytes(in, out, BUFF_SIZE);
        } else if (flag == CB.BUFF_OFFSET) {
            copyBytesWithOffset(in, out, BUFF_SIZE);
        } else {
            copyByteByByte(in, out);
        }
        LOG.info(in);
        bandwidth = in.getBytesPerSec();
        replacedert.replacedertEquals(in.getTotalBytesRead(), tmpFile.length());
        replacedert.replacedertTrue(in.getBytesPerSec() > maxBandwidth / (factor * 1.2));
        replacedert.replacedertTrue(in.getTotalSleepTime() > sleepTime || in.getBytesPerSec() <= maxBPS);
    } finally {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
    return bandwidth;
}

18 Source : FileBasedCopyListing.java
with Apache License 2.0
from NJUJYB

private List<Path> fetchFileList(Path sourceListing) throws IOException {
    List<Path> result = new ArrayList<Path>();
    FileSystem fs = sourceListing.getFileSystem(getConf());
    BufferedReader input = null;
    try {
        input = new BufferedReader(new InputStreamReader(fs.open(sourceListing)));
        String line = input.readLine();
        while (line != null) {
            result.add(new Path(line));
            line = input.readLine();
        }
    } finally {
        IOUtils.closeStream(input);
    }
    return result;
}

18 Source : TestMultiFileSplit.java
with Apache License 2.0
from NJUJYB

public void testReadWrite() throws Exception {
    MultiFileSplit split = new MultiFileSplit(new JobConf(), new Path[] { new Path("/test/path/1"), new Path("/test/path/2") }, new long[] { 100, 200 });
    ByteArrayOutputStream bos = null;
    byte[] result = null;
    try {
        bos = new ByteArrayOutputStream();
        split.write(new DataOutputStream(bos));
        result = bos.toByteArray();
    } finally {
        IOUtils.closeStream(bos);
    }
    MultiFileSplit readSplit = new MultiFileSplit();
    ByteArrayInputStream bis = null;
    try {
        bis = new ByteArrayInputStream(result);
        readSplit.readFields(new DataInputStream(bis));
    } finally {
        IOUtils.closeStream(bis);
    }
    replacedertTrue(split.getLength() != 0);
    replacedertEquals(split.getLength(), readSplit.getLength());
    replacedertTrue(Arrays.equals(split.getPaths(), readSplit.getPaths()));
    replacedertTrue(Arrays.equals(split.getLengths(), readSplit.getLengths()));
    System.out.println(split.toString());
}

18 Source : TestReplaceDatanodeOnFailure.java
with Apache License 2.0
from NJUJYB

/**
 * Test replace datanode on failure.
 */
@Test
public void testReplaceDatanodeOnFailure() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // always replace a datanode
    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
    final String[] racks = new String[REPLICATION];
    Arrays.fill(racks, RACK0);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path dir = new Path(DIR);
        final SlowWriter[] slowwriters = new SlowWriter[10];
        for (int i = 1; i <= slowwriters.length; i++) {
            // create slow writers in different speed
            slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i), i * 200L);
        }
        for (SlowWriter s : slowwriters) {
            s.start();
        }
        // Let slow writers write something.
        // Some of them are too slow and will be not yet started.
        sleepSeconds(1);
        // start new datanodes
        cluster.startDataNodes(conf, 2, true, null, new String[] { RACK1, RACK1 });
        // stop an old datanode
        cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
        // Let the slow writer writes a few more seconds
        // Everyone should have written something.
        sleepSeconds(5);
        // check replication and interrupt.
        for (SlowWriter s : slowwriters) {
            s.checkReplication();
            s.interruptRunning();
        }
        // close files
        for (SlowWriter s : slowwriters) {
            s.joinAndClose();
        }
        // Verify the file
        LOG.info("Verify the file");
        for (int i = 0; i < slowwriters.length; i++) {
            LOG.info(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen());
            FSDataInputStream in = null;
            try {
                in = fs.open(slowwriters[i].filepath);
                for (int j = 0, x; (x = in.read()) != -1; j++) {
                    replacedert.replacedertEquals(j, x);
                }
            } finally {
                IOUtils.closeStream(in);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 Source : TestLeaseRecovery2.java
with Apache License 2.0
from NJUJYB

/**
 * stop the cluster
 * @throws IOException
 */
@AfterClreplaced
public static void tearDown() throws IOException {
    IOUtils.closeStream(dfs);
    if (cluster != null) {
        cluster.shutdown();
    }
}

18 Source : TestFileCreationClient.java
with Apache License 2.0
from NJUJYB

/**
 * Test lease recovery Triggered by DFSClient.
 */
@Test
public void testClientTriggeredLeaseRecovery() throws Exception {
    final int REPLICATION = 3;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
    try {
        final FileSystem fs = cluster.getFileSystem();
        final Path dir = new Path("/wrwelkj");
        SlowWriter[] slowwriters = new SlowWriter[10];
        for (int i = 0; i < slowwriters.length; i++) {
            slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i));
        }
        try {
            for (int i = 0; i < slowwriters.length; i++) {
                slowwriters[i].start();
            }
            // let writers get started
            Thread.sleep(1000);
            // stop a datanode, it should have least recover.
            cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
            // let the slow writer writes a few more seconds
            System.out.println("Wait a few seconds");
            Thread.sleep(5000);
        } finally {
            for (int i = 0; i < slowwriters.length; i++) {
                if (slowwriters[i] != null) {
                    slowwriters[i].running = false;
                    slowwriters[i].interrupt();
                }
            }
            for (int i = 0; i < slowwriters.length; i++) {
                if (slowwriters[i] != null) {
                    slowwriters[i].join();
                }
            }
        }
        // Verify the file
        System.out.println("Verify the file");
        for (int i = 0; i < slowwriters.length; i++) {
            System.out.println(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen());
            FSDataInputStream in = null;
            try {
                in = fs.open(slowwriters[i].filepath);
                for (int j = 0, x; (x = in.read()) != -1; j++) {
                    replacedertEquals(j, x);
                }
            } finally {
                IOUtils.closeStream(in);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 Source : TestFileAppendRestart.java
with Apache License 2.0
from NJUJYB

private void writeAndAppend(FileSystem fs, Path p, int lengthForCreate, int lengthForAppend) throws IOException {
    // Creating a file with 4096 blockSize to write multiple blocks
    FSDataOutputStream stream = fs.create(p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
    try {
        AppendTestUtil.write(stream, 0, lengthForCreate);
        stream.close();
        stream = fs.append(p);
        AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
        stream.close();
    } finally {
        IOUtils.closeStream(stream);
    }
    int totalLength = lengthForCreate + lengthForAppend;
    replacedertEquals(totalLength, fs.getFileStatus(p).getLen());
}

18 Source : TestJournal.java
with Apache License 2.0
from NJUJYB

@After
public void cleanup() {
    IOUtils.closeStream(journal);
}

18 Source : MD5FileUtils.java
with Apache License 2.0
from NJUJYB

/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
    InputStream in = new FileInputStream(dataFile);
    try {
        MessageDigest digester = MD5Hash.getDigester();
        DigestInputStream dis = new DigestInputStream(in, digester);
        IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128 * 1024);
        return new MD5Hash(digester.digest());
    } finally {
        IOUtils.closeStream(in);
    }
}

18 Source : BestEffortLongFile.java
with Apache License 2.0
from NJUJYB

private void lazyOpen() throws IOException {
    if (ch != null) {
        return;
    }
    // Load current value.
    byte[] data = null;
    try {
        data = Files.toByteArray(file);
    } catch (FileNotFoundException fnfe) {
    // Expected - this will use default value.
    }
    if (data != null && data.length != 0) {
        if (data.length != Longs.BYTES) {
            throw new IOException("File " + file + " had invalid length: " + data.length);
        }
        value = Longs.fromByteArray(data);
    } else {
        value = defaultVal;
    }
    // Now open file for future writes.
    RandomAccessFile raf = new RandomAccessFile(file, "rw");
    try {
        ch = raf.getChannel();
    } finally {
        if (ch == null) {
            IOUtils.closeStream(raf);
        }
    }
}

18 Source : OfflineEditsVisitorFactory.java
with Apache License 2.0
from NJUJYB

/**
 * Factory function that creates an EditsVisitor object
 *
 * @param filename              output filename
 * @param processor             type of visitor to create
 * @param printToScreen         parameter preplaceded to visitor constructor
 *
 * @return EditsVisitor for appropriate output format (binary, xml, etc.)
 */
static public OfflineEditsVisitor getEditsVisitor(String filename, String processor, boolean printToScreen) throws IOException {
    if (processor.toLowerCase().equals("binary")) {
        return new BinaryEditsVisitor(filename);
    }
    OfflineEditsVisitor vis;
    OutputStream fout = new FileOutputStream(filename);
    OutputStream out = null;
    try {
        if (!printToScreen) {
            out = fout;
        } else {
            OutputStream[] outs = new OutputStream[2];
            outs[0] = fout;
            outs[1] = System.out;
            out = new TeeOutputStream(outs);
        }
        if (processor.toLowerCase().equals("xml")) {
            vis = new XmlEditsVisitor(out);
        } else if (processor.toLowerCase().equals("stats")) {
            vis = new StatisticsEditsVisitor(out);
        } else {
            throw new IOException("Unknown proccesor " + processor + " (valid processors: xml, binary, stats)");
        }
        out = fout = null;
        return vis;
    } finally {
        IOUtils.closeStream(fout);
        IOUtils.closeStream(out);
    }
}

18 Source : EditLogFileInputStream.java
with Apache License 2.0
from NJUJYB

static FSEditLogLoader.EditLogValidation scanEditLog(File file) throws IOException {
    EditLogFileInputStream in;
    try {
        in = new EditLogFileInputStream(file);
        // read the header, initialize the inputstream, but do not check the
        // layoutversion
        in.getVersion(false);
    } catch (LogHeaderCorruptException e) {
        LOG.warn("Log file " + file + " has no valid header", e);
        return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID, true);
    }
    long lastPos = 0;
    long lastTxId = HdfsConstants.INVALID_TXID;
    long numValid = 0;
    try {
        while (true) {
            long txid = HdfsConstants.INVALID_TXID;
            lastPos = in.getPosition();
            try {
                if ((txid = in.scanNextOp()) == HdfsConstants.INVALID_TXID) {
                    break;
                }
            } catch (Throwable t) {
                FSImage.LOG.warn("Caught exception after scanning through " + numValid + " ops from " + in + " while determining its valid length. Position was " + lastPos, t);
                in.resync();
                FSImage.LOG.warn("After resync, position is " + in.getPosition());
                continue;
            }
            if (lastTxId == HdfsConstants.INVALID_TXID || txid > lastTxId) {
                lastTxId = txid;
            }
            numValid++;
        }
        return new EditLogValidation(lastPos, lastTxId, false);
    } finally {
        IOUtils.closeStream(in);
    }
}

18 Source : BlockMetadataHeader.java
with Apache License 2.0
from NJUJYB

/**
 * Read the checksum header from the meta file.
 * @return the data checksum obtained from the header.
 */
public static DataChecksum readDataChecksum(File metaFile) throws IOException {
    DataInputStream in = null;
    try {
        in = new DataInputStream(new BufferedInputStream(new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE));
        return readDataChecksum(in, metaFile);
    } finally {
        IOUtils.closeStream(in);
    }
}

18 Source : BlockMetadataHeader.java
with Apache License 2.0
from NJUJYB

/**
 * Reads header at the top of metadata file and returns the header.
 *
 * @return metadata header for the block
 * @throws IOException
 */
public static BlockMetadataHeader readHeader(File file) throws IOException {
    DataInputStream in = null;
    try {
        in = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
        return readHeader(in);
    } finally {
        IOUtils.closeStream(in);
    }
}

18 Source : AbstractContractOpenTest.java
with Apache License 2.0
from NJUJYB

@Test
public void testOpenFileTwice() throws Throwable {
    describe("verify that two opened file streams are independent");
    Path path = path("testopenfiletwice.txt");
    byte[] block = dataset(TEST_FILE_LEN, 0, 255);
    // this file now has a simple rule: offset => value
    createFile(getFileSystem(), path, false, block);
    // open first
    FSDataInputStream instream1 = getFileSystem().open(path);
    int c = instream1.read();
    replacedertEquals(0, c);
    FSDataInputStream instream2 = null;
    try {
        instream2 = getFileSystem().open(path);
        replacedertEquals("first read of instream 2", 0, instream2.read());
        replacedertEquals("second read of instream 1", 1, instream1.read());
        instream1.close();
        replacedertEquals("second read of instream 2", 1, instream2.read());
        // close instream1 again
        instream1.close();
    } finally {
        IOUtils.closeStream(instream1);
        IOUtils.closeStream(instream2);
    }
}

18 Source : AbstractContractCreateTest.java
with Apache License 2.0
from NJUJYB

@Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
    describe("verify that a newly created file exists as soon as open returns");
    Path path = path("testCreatedFileIsImmediatelyVisible");
    FSDataOutputStream out = null;
    try {
        out = getFileSystem().create(path, false, 4096, (short) 1, 1024);
        if (!getFileSystem().exists(path)) {
            if (isSupported(IS_BLOBSTORE)) {
                // object store: downgrade to a skip so that the failure is visible
                // in test results
                skip("Filesystem is an object store and newly created files are not immediately visible");
            }
            replacedertPathExists("expected path to be visible before anything written", path);
        }
    } finally {
        IOUtils.closeStream(out);
    }
}

18 Source : GraphiteSink.java
with Apache License 2.0
from NJUJYB

@Override
public void close() throws IOException {
    try {
        IOUtils.closeStream(writer);
        writer = null;
        LOG.info("writer in GraphiteSink is closed!");
    } catch (Throwable e) {
        throw new MetricsException("Error closing writer", e);
    } finally {
        if (socket != null && !socket.isClosed()) {
            socket.close();
            socket = null;
            LOG.info("socket in GraphiteSink is closed!");
        }
    }
}

18 Source : FileBasedCopyListing.java
with Apache License 2.0
from naver

private List<Path> fetchFileList(Path sourceListing) throws IOException {
    List<Path> result = new ArrayList<Path>();
    FileSystem fs = sourceListing.getFileSystem(getConf());
    BufferedReader input = null;
    try {
        input = new BufferedReader(new InputStreamReader(fs.open(sourceListing), Charset.forName("UTF-8")));
        String line = input.readLine();
        while (line != null) {
            result.add(new Path(line));
            line = input.readLine();
        }
    } finally {
        IOUtils.closeStream(input);
    }
    return result;
}

18 Source : OfflineEditsVisitorFactory.java
with Apache License 2.0
from naver

/**
 * Factory function that creates an EditsVisitor object
 *
 * @param filename              output filename
 * @param processor             type of visitor to create
 * @param printToScreen         parameter preplaceded to visitor constructor
 *
 * @return EditsVisitor for appropriate output format (binary, xml, etc.)
 */
static public OfflineEditsVisitor getEditsVisitor(String filename, String processor, boolean printToScreen) throws IOException {
    if (StringUtils.equalsIgnoreCase("binary", processor)) {
        return new BinaryEditsVisitor(filename);
    }
    OfflineEditsVisitor vis;
    OutputStream fout = new FileOutputStream(filename);
    OutputStream out = null;
    try {
        if (!printToScreen) {
            out = fout;
        } else {
            OutputStream[] outs = new OutputStream[2];
            outs[0] = fout;
            outs[1] = System.out;
            out = new TeeOutputStream(outs);
        }
        if (StringUtils.equalsIgnoreCase("xml", processor)) {
            vis = new XmlEditsVisitor(out);
        } else if (StringUtils.equalsIgnoreCase("stats", processor)) {
            vis = new StatisticsEditsVisitor(out);
        } else {
            throw new IOException("Unknown proccesor " + processor + " (valid processors: xml, binary, stats)");
        }
        out = fout = null;
        return vis;
    } finally {
        IOUtils.closeStream(fout);
        IOUtils.closeStream(out);
    }
}

18 Source : ReplicaInputStreams.java
with Apache License 2.0
from naver

@Override
public void close() {
    IOUtils.closeStream(dataIn);
    IOUtils.closeStream(checksumIn);
    IOUtils.cleanup(null, volumeRef);
}

18 Source : HadoopIOFormatsITCase.java
with Apache License 2.0
from ljygz

@Override
protected void preSubmit() throws Exception {
    resultPath = new String[] { getTempDirPath("result0"), getTempDirPath("result1") };
    File sequenceFile = createAndRegisterTempFile("seqFile");
    sequenceFileInPath = sequenceFile.toURI().toString();
    // Create a sequence file
    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    FileSystem fs = FileSystem.get(URI.create(sequenceFile.getAbsolutePath()), conf);
    Path path = new Path(sequenceFile.getAbsolutePath());
    // ------------------ Long / Text Key Value pair: ------------
    int kvCount = 4;
    LongWritable key = new LongWritable();
    Text value = new Text();
    SequenceFile.Writer writer = null;
    try {
        writer = SequenceFile.createWriter(fs, conf, path, key.getClreplaced(), value.getClreplaced());
        for (int i = 0; i < kvCount; i++) {
            if (i == 1) {
                // write key = 0 a bit more often.
                for (int a = 0; a < 15; a++) {
                    key.set(i);
                    value.set(i + " - somestring");
                    writer.append(key, value);
                }
            }
            key.set(i);
            value.set(i + " - somestring");
            writer.append(key, value);
        }
    } finally {
        IOUtils.closeStream(writer);
    }
    // ------------------ Long / Text Key Value pair: ------------
    File sequenceFileNull = createAndRegisterTempFile("seqFileNullKey");
    sequenceFileInPathNull = sequenceFileNull.toURI().toString();
    path = new Path(sequenceFileInPathNull);
    LongWritable value1 = new LongWritable();
    SequenceFile.Writer writer1 = null;
    try {
        writer1 = SequenceFile.createWriter(fs, conf, path, NullWritable.clreplaced, value1.getClreplaced());
        for (int i = 0; i < kvCount; i++) {
            value1.set(i);
            writer1.append(NullWritable.get(), value1);
        }
    } finally {
        IOUtils.closeStream(writer1);
    }
}

18 Source : CubeStatsReader.java
with Apache License 2.0
from Kyligence

private File writeTmpSeqFile(InputStream inputStream) throws IOException {
    File tempFile = File.createTempFile("kylin_stats_tmp", ".seq");
    FileOutputStream out = null;
    try {
        out = new FileOutputStream(tempFile);
        org.apache.commons.io.IOUtils.copy(inputStream, out);
    } finally {
        IOUtils.closeStream(inputStream);
        IOUtils.closeStream(out);
    }
    return tempFile;
}

18 Source : MyRecordReader.java
with MIT License
from josonle

@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
    if (!processed) {
        // 获取分片长度字节数组
        byte[] contents = new byte[(int) fileSplit.getLength()];
        // 获取切片所在位置
        Path file = fileSplit.getPath();
        FileSystem fSystem = file.getFileSystem(conf);
        FSDataInputStream in = null;
        try {
            // 打开文件
            in = fSystem.open(file);
            // 读取整个文件字节数据,写入contents
            IOUtils.readFully(in, contents, 0, contents.length);
            // 将整个文件数据赋值给value
            value.set(contents, 0, contents.length);
        } finally {
            IOUtils.closeStream(in);
        }
        processed = true;
        return true;
    }
    return false;
}

18 Source : CopyListing.java
with Apache License 2.0
from HotelsDotCom

/**
 * Validate the final resulting path listing. Checks if there are duplicate entries. If preserving ACLs, checks that
 * file system can support ACLs. If preserving XAttrs, checks that file system can support XAttrs.
 *
 * @param pathToListFile path listing build by doBuildListing
 * @param options Input options to S3MapReduceCp
 * @throws IOException Any issues while checking for duplicates and throws
 * @throws DuplicateFileException if there are duplicates
 */
private void validateFinalListing(Path pathToListFile, S3MapReduceCpOptions options) throws DuplicateFileException, IOException {
    Configuration config = getConf();
    FileSystem fs = pathToListFile.getFileSystem(config);
    Path sortedList = sortListing(fs, config, pathToListFile);
    SequenceFile.Reader reader = new SequenceFile.Reader(config, SequenceFile.Reader.file(sortedList));
    try {
        // source relative path can never hold *
        Text lastKey = new Text("*");
        CopyListingFileStatus lastFileStatus = new CopyListingFileStatus();
        Text currentKey = new Text();
        while (reader.next(currentKey)) {
            if (currentKey.equals(lastKey)) {
                CopyListingFileStatus currentFileStatus = new CopyListingFileStatus();
                reader.getCurrentValue(currentFileStatus);
                throw new DuplicateFileException("File " + lastFileStatus.getPath() + " and " + currentFileStatus.getPath() + " would cause duplicates. Aborting");
            }
            reader.getCurrentValue(lastFileStatus);
            lastKey.set(currentKey);
        }
    } finally {
        IOUtils.closeStream(reader);
    }
}

See More Examples