org.apache.hadoop.fs.BlockLocation

Here are the examples of the java api class org.apache.hadoop.fs.BlockLocation taken from open source projects.

1. FSUtils#computeHDFSBlocksDistribution()

Project: hindex
Source File: FSUtils.java
View license
/**
   * Compute HDFS blocks distribution of a given file, or a portion of the file
   * @param fs file system
   * @param status file status of the file
   * @param start start position of the portion
   * @param length length of the portion
   * @return The HDFS blocks distribution
   */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs, FileStatus status, long start, long length) throws IOException {
    HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
    BlockLocation[] blockLocations = fs.getFileBlockLocations(status, start, length);
    for (BlockLocation bl : blockLocations) {
        String[] hosts = bl.getHosts();
        long len = bl.getLength();
        blocksDistribution.addHostsAndBlockWeight(hosts, len);
    }
    return blocksDistribution;
}

2. Metadata#getHostAffinity()

Project: drill
Source File: Metadata.java
View license
/**
   * Get the host affinity for a row group
   *
   * @param fileStatus the parquet file
   * @param start      the start of the row group
   * @param length     the length of the row group
   * @return
   * @throws IOException
   */
private Map<String, Float> getHostAffinity(FileStatus fileStatus, long start, long length) throws IOException {
    BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
    Map<String, Float> hostAffinityMap = Maps.newHashMap();
    for (BlockLocation blockLocation : blockLocations) {
        for (String host : blockLocation.getHosts()) {
            Float currentAffinity = hostAffinityMap.get(host);
            float blockStart = blockLocation.getOffset();
            float blockEnd = blockStart + blockLocation.getLength();
            float rowGroupEnd = start + length;
            Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) - (blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
            if (currentAffinity != null) {
                hostAffinityMap.put(host, currentAffinity + newAffinity);
            } else {
                hostAffinityMap.put(host, newAffinity);
            }
        }
    }
    return hostAffinityMap;
}

3. BlockMapBuilder#buildBlockMap()

Project: drill
Source File: BlockMapBuilder.java
View license
/**
   * Builds a mapping of block locations to file byte range
   */
private ImmutableRangeMap<Long, BlockLocation> buildBlockMap(FileStatus status) throws IOException {
    final Timer.Context context = metrics.timer(BLOCK_MAP_BUILDER_TIMER).time();
    BlockLocation[] blocks;
    ImmutableRangeMap<Long, BlockLocation> blockMap;
    blocks = fs.getFileBlockLocations(status, 0, status.getLen());
    ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long, BlockLocation>();
    for (BlockLocation block : blocks) {
        long start = block.getOffset();
        long end = start + block.getLength();
        Range<Long> range = Range.closedOpen(start, end);
        blockMapBuilder = blockMapBuilder.put(range, block);
    }
    blockMap = blockMapBuilder.build();
    blockMapMap.put(status.getPath(), blockMap);
    context.stop();
    return blockMap;
}

4. TestAffinityCalculator#testBuildRangeMap()

Project: drill
Source File: TestAffinityCalculator.java
View license
//  @Test
//  public void testSetEndpointBytes(@Injectable final FileSystem fs, @Injectable final FileStatus file) throws Throwable{
//    final long blockSize = 256*1024*1024;
//    LinkedList<ParquetGroupScan.RowGroupInfo> rowGroups = new LinkedList<>();
//    int numberOfHosts = 4;
//    int numberOfBlocks = 3;
//    String port = "1234";
//    String[] hosts = new String[numberOfHosts];
//
//    final BlockLocation[] blockLocations = buildBlockLocations(hosts, blockSize);
//    final LinkedList<CoordinationProtos.DrillbitEndpoint> endPoints = buildEndpoints(numberOfHosts);
//    buildRowGroups(rowGroups, numberOfBlocks, blockSize, 3);
//
//    new NonStrictExpectations() {{
//      fs.getFileBlockLocations(file, 0, 3*blockSize); result = blockLocations;
//      fs.getFileStatus(new Path(path)); result = file;
//      file.getLen(); result = 3*blockSize;
//    }};
//
//
//    BlockMapBuilder ac = new BlockMapBuilder(fs, endPoints);
//    for (ParquetGroupScan.RowGroupInfo rowGroup : rowGroups) {
//      ac.setEndpointBytes(rowGroup);
//    }
//    ParquetGroupScan.RowGroupInfo rg = rowGroups.get(0);
//    Long b = rg.getEndpointBytes().get(endPoints.get(0));
//    assertEquals(blockSize,b.longValue());
//    b = rg.getEndpointBytes().get(endPoints.get(3));
//    assertNull(b);
//
//    buildRowGroups(rowGroups, numberOfBlocks, blockSize, 2);
//
//    ac = new BlockMapBuilder(fs, endPoints);
//    for (ParquetGroupScan.RowGroupInfo rowGroup : rowGroups) {
//      ac.setEndpointBytes(rowGroup);
//    }
//    rg = rowGroups.get(0);
//    b = rg.getEndpointBytes().get(endPoints.get(0));
//    assertEquals(blockSize*3/2,b.longValue());
//    b = rg.getEndpointBytes().get(endPoints.get(3));
//    assertEquals(blockSize / 2, b.longValue());
//
//    buildRowGroups(rowGroups, numberOfBlocks, blockSize, 6);
//
//    ac = new BlockMapBuilder(fs, endPoints);
//    for (ParquetGroupScan.RowGroupInfo rowGroup : rowGroups) {
//      ac.setEndpointBytes(rowGroup);
//    }
//    rg = rowGroups.get(0);
//    b = rg.getEndpointBytes().get(endPoints.get(0));
//    assertEquals(blockSize/2,b.longValue());
//    b = rg.getEndpointBytes().get(endPoints.get(3));
//    assertNull(b);
//  }
@Test
public void testBuildRangeMap() {
    BlockLocation[] blocks = buildBlockLocations(new String[4], 256 * 1024 * 1024);
    long tA = System.nanoTime();
    ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long, BlockLocation>();
    for (BlockLocation block : blocks) {
        long start = block.getOffset();
        long end = start + block.getLength();
        Range<Long> range = Range.closedOpen(start, end);
        blockMapBuilder = blockMapBuilder.put(range, block);
    }
    ImmutableRangeMap<Long, BlockLocation> map = blockMapBuilder.build();
    long tB = System.nanoTime();
    System.out.println(String.format("Took %f ms to build range map", (tB - tA) / 1e6));
}

5. DirectoryStripeReader#getNextStripeBlockLocations()

View license
public BlockLocation[] getNextStripeBlockLocations() throws IOException {
    BlockLocation[] blocks = new BlockLocation[codec.stripeLength];
    int startOffset = (int) currentStripeIdx * codec.stripeLength;
    int curFileIdx = this.stripeBlocks.get(startOffset).fileIdx;
    FileStatus curFile = lfs.get(curFileIdx);
    BlockLocation[] curBlocks = fs.getFileBlockLocations(curFile, 0, curFile.getLen());
    for (int i = 0; i < codec.stripeLength; i++) {
        if (startOffset + i < this.stripeBlocks.size()) {
            BlockInfo bi = this.stripeBlocks.get(startOffset + i);
            if (bi.fileIdx != curFileIdx) {
                curFileIdx = bi.fileIdx;
                curFile = lfs.get(curFileIdx);
                curBlocks = fs.getFileBlockLocations(curFile, 0, curFile.getLen());
            }
            blocks[i] = curBlocks[bi.blockId];
        } else {
            // We have no src data at this offset.
            blocks[i] = null;
        }
    }
    currentStripeIdx++;
    return blocks;
}

6. RaidShell#collectNumCorruptBlocksInFile()

Project: hadoop-20
Source File: RaidShell.java
View license
public static int collectNumCorruptBlocksInFile(final DistributedFileSystem dfs, final Path filePath) throws IOException {
    FileStatus stat = dfs.getFileStatus(filePath);
    BlockLocation[] blocks = dfs.getFileBlockLocations(stat, 0, stat.getLen());
    int count = 0;
    for (BlockLocation block : blocks) {
        if (RaidShell.isBlockCorrupt(block)) {
            count++;
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " corrupt in block " + block);
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " OK in block " + block);
            }
        }
    }
    return count;
}

7. TestFileLocalRead#checkFullFile()

Project: hadoop-20
Source File: TestFileLocalRead.java
View license
static void checkFullFile(FileSystem fs, Path name) throws IOException {
    FileStatus stat = fs.getFileStatus(name);
    BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] hosts = locations[idx].getNames();
        for (int i = 0; i < hosts.length; i++) {
            System.out.print(hosts[i] + " ");
        }
        System.out.println(" off " + locations[idx].getOffset() + " len " + locations[idx].getLength());
    }
    byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 2");
    stm.close();
}

8. DFSClient#getHints()

Project: hadoop-20
Source File: DFSClient.java
View license
/**
   *  @deprecated Use getBlockLocations instead
   *
   * Get hints about the location of the indicated block(s).
   *
   * getHints() returns a list of hostnames that store data for
   * a specific file region.  It returns a set of hostnames for
   * every block within the indicated region.
   *
   * This function is very useful when writing code that considers
   * data-placement when performing operations.  For example, the
   * MapReduce system tries to schedule tasks on the same machines
   * as the data-block the task processes.
   */
@Deprecated
public String[][] getHints(String src, long start, long length) throws IOException {
    BlockLocation[] blkLocations = getBlockLocations(src, start, length);
    if ((blkLocations == null) || (blkLocations.length == 0)) {
        return new String[0][];
    }
    int blkCount = blkLocations.length;
    String[][] hints = new String[blkCount][];
    for (int i = 0; i < blkCount; i++) {
        String[] hosts = blkLocations[i].getHosts();
        hints[i] = new String[hosts.length];
        hints[i] = hosts;
    }
    return hints;
}

9. TestFileCreation#checkFullFile()

Project: hadoop-common
Source File: TestFileCreation.java
View license
static void checkFullFile(FileSystem fs, Path name) throws IOException {
    FileStatus stat = fs.getFileStatus(name);
    BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] hosts = locations[idx].getNames();
        for (int i = 0; i < hosts.length; i++) {
            System.out.print(hosts[i] + " ");
        }
        System.out.println(" off " + locations[idx].getOffset() + " len " + locations[idx].getLength());
    }
    byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 2");
    stm.close();
}

10. FileInputFormat#getBlockIndex()

Project: hadoop-20
Source File: FileInputFormat.java
View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

11. FileInputFormat#getBlockIndex()

Project: hadoop-20
Source File: FileInputFormat.java
View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

12. KFSEmulationImpl#getDataLocation()

Project: hadoop-20
Source File: KFSEmulationImpl.java
View license
public String[][] getDataLocation(String path, long start, long len) throws IOException {
    BlockLocation[] blkLocations = localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)), start, len);
    if ((blkLocations == null) || (blkLocations.length == 0)) {
        return new String[0][];
    }
    int blkCount = blkLocations.length;
    String[][] hints = new String[blkCount][];
    for (int i = 0; i < blkCount; i++) {
        String[] hosts = blkLocations[i].getHosts();
        hints[i] = new String[hosts.length];
        hints[i] = hosts;
    }
    return hints;
}

13. TestDecommission#printFileLocations()

Project: hadoop-common
Source File: TestDecommission.java
View license
private void printFileLocations(FileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] loc = locations[idx].getHosts();
        System.out.print("Block[" + idx + "] : ");
        for (int j = 0; j < loc.length; j++) {
            System.out.print(loc[j] + " ");
        }
        System.out.println("");
    }
}

14. TestFileCreation#checkFullFile()

Project: hadoop-20
Source File: TestFileCreation.java
View license
static void checkFullFile(FileSystem fs, Path name) throws IOException {
    FileStatus stat = fs.getFileStatus(name);
    BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] hosts = locations[idx].getNames();
        for (int i = 0; i < hosts.length; i++) {
            System.out.print(hosts[i] + " ");
        }
        System.out.println(" off " + locations[idx].getOffset() + " len " + locations[idx].getLength());
    }
    byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 2");
    stm.close();
}

15. TestSmallBlock#checkFile()

Project: hadoop-common
Source File: TestSmallBlock.java
View license
private void checkFile(FileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    assertEquals("Number of blocks", fileSize, locations.length);
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[fileSize];
    if (simulatedStorage) {
        for (int i = 0; i < expected.length; ++i) {
            expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
        }
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
    stm.close();
}

16. KFSEmulationImpl#getDataLocation()

Project: hadoop-common
Source File: KFSEmulationImpl.java
View license
public String[][] getDataLocation(String path, long start, long len) throws IOException {
    BlockLocation[] blkLocations = localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)), start, len);
    if ((blkLocations == null) || (blkLocations.length == 0)) {
        return new String[0][];
    }
    int blkCount = blkLocations.length;
    String[][] hints = new String[blkCount][];
    for (int i = 0; i < blkCount; i++) {
        String[] hosts = blkLocations[i].getHosts();
        hints[i] = new String[hosts.length];
        hints[i] = hosts;
    }
    return hints;
}

17. TestSmallBlock#checkFile()

Project: hadoop-20
Source File: TestSmallBlock.java
View license
private void checkFile(FileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    assertEquals("Number of blocks", fileSize, locations.length);
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[fileSize];
    if (simulatedStorage) {
        for (int i = 0; i < expected.length; ++i) {
            expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
        }
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
    stm.close();
}

18. FileInputFormat#getBlockIndex()

Project: hadoop-common
Source File: FileInputFormat.java
View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

19. KosmosFileSystem#getFileBlockLocations()

Project: hadoop-common
Source File: KosmosFileSystem.java
View license
/**
     * Return null if the file doesn't exist; otherwise, get the
     * locations of the various chunks of the file file from KFS.
     */
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
    if (file == null) {
        return null;
    }
    String srep = makeAbsolute(file.getPath()).toUri().getPath();
    String[][] hints = kfsImpl.getDataLocation(srep, start, len);
    if (hints == null) {
        return null;
    }
    BlockLocation[] result = new BlockLocation[hints.length];
    long blockSize = getDefaultBlockSize();
    long length = len;
    long blockStart = start;
    for (int i = 0; i < result.length; ++i) {
        result[i] = new BlockLocation(null, hints[i], blockStart, length < blockSize ? length : blockSize);
        blockStart += blockSize;
        length -= blockSize;
    }
    return result;
}

20. FileInputFormat#getBlockIndex()

Project: hadoop-common
Source File: FileInputFormat.java
View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

21. KosmosFileSystem#getFileBlockLocations()

Project: hadoop-20
Source File: KosmosFileSystem.java
View license
/**
     * Return null if the file doesn't exist; otherwise, get the
     * locations of the various chunks of the file file from KFS.
     */
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
    if (file == null) {
        return null;
    }
    String srep = makeAbsolute(file.getPath()).toUri().getPath();
    String[][] hints = kfsImpl.getDataLocation(srep, start, len);
    if (hints == null) {
        return null;
    }
    BlockLocation[] result = new BlockLocation[hints.length];
    long blockSize = getDefaultBlockSize();
    long length = len;
    long blockStart = start;
    for (int i = 0; i < result.length; ++i) {
        result[i] = new BlockLocation(null, hints[i], blockStart, length < blockSize ? length : blockSize);
        blockStart += blockSize;
        length -= blockSize;
    }
    return result;
}

22. TestPlacementMonitor#createBlockInfo()

Project: hadoop-20
Source File: TestPlacementMonitor.java
View license
private BlockInfo createBlockInfo(FileStatus stat, LocatedBlock b) {
    DatanodeInfo[] locations = b.getLocations();
    String[] hosts = new String[locations.length];
    String[] names = new String[locations.length];
    for (int i = 0; i < locations.length; ++i) {
        DatanodeInfo d = locations[i];
        hosts[i] = d.getHost();
        names[i] = d.getName();
    }
    BlockLocation loc = new BlockLocation(names, hosts, b.getStartOffset(), b.getBlockSize());
    return new BlockInfo(loc, stat);
}

23. TestBlockCopier#printFileLocations()

Project: hadoop-20
Source File: TestBlockCopier.java
View license
private void printFileLocations(FileStatus file) throws IOException {
    System.out.println(file.getPath() + " block locations:");
    BlockLocation[] locations = fileSys.getFileBlockLocations(file, 0, file.getLen());
    for (int idx = 0; idx < locations.length; idx++) {
        String[] loc = locations[idx].getNames();
        System.out.print("Block[" + idx + "] : ");
        for (int j = 0; j < loc.length; j++) {
            System.out.print(loc[j] + " ");
        }
        System.out.println();
    }
}

24. TestBlockUnderConstruction#writeFile()

View license
void writeFile(Path file, FSDataOutputStream stm, int size) throws IOException {
    long blocksBefore = stm.getPos() / BLOCK_SIZE;
    TestFileCreation.writeFile(stm, BLOCK_SIZE);
    int blocksAfter = 0;
    // wait until the block is allocated by DataStreamer
    BlockLocation[] locatedBlocks;
    while (blocksAfter <= blocksBefore) {
        locatedBlocks = hdfs.getClient().getBlockLocations(file.toString(), 0L, BLOCK_SIZE * NUM_BLOCKS);
        blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
    }
}

25. TestDecommission#printFileLocations()

Project: hadoop-hdfs
Source File: TestDecommission.java
View license
private void printFileLocations(FileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] loc = locations[idx].getHosts();
        System.out.print("Block[" + idx + "] : ");
        for (int j = 0; j < loc.length; j++) {
            System.out.print(loc[j] + " ");
        }
        System.out.println("");
    }
}

26. TestFileCreation#checkFullFile()

Project: hadoop-hdfs
Source File: TestFileCreation.java
View license
static void checkFullFile(FileSystem fs, Path name) throws IOException {
    FileStatus stat = fs.getFileStatus(name);
    BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
        String[] hosts = locations[idx].getNames();
        for (int i = 0; i < hosts.length; i++) {
            System.out.print(hosts[i] + " ");
        }
        System.out.println(" off " + locations[idx].getOffset() + " len " + locations[idx].getLength());
    }
    byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 2");
    stm.close();
}

27. TestSmallBlock#checkFile()

Project: hadoop-hdfs
Source File: TestSmallBlock.java
View license
private void checkFile(FileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    assertEquals("Number of blocks", fileSize, locations.length);
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[fileSize];
    if (simulatedStorage) {
        for (int i = 0; i < expected.length; ++i) {
            expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
        }
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
    stm.close();
}

28. TestAffinityCalculator#buildBlockLocations2()

Project: drill
Source File: TestAffinityCalculator.java
View license
public BlockLocation[] buildBlockLocations2(String[] hosts, long blockSize) {
    String[] names = new String[hosts.length];
    for (int i = 0; i < hosts.length; i++) {
        hosts[i] = "host" + i;
        names[i] = "host:" + port;
    }
    BlockLocation[] blockLocations = new BlockLocation[4];
    blockLocations[0] = new BlockLocation(new String[] { names[0] }, new String[] { hosts[0] }, 0, blockSize);
    blockLocations[1] = new BlockLocation(new String[] { names[1] }, new String[] { hosts[1] }, blockSize, blockSize);
    blockLocations[3] = new BlockLocation(new String[] { names[3] }, new String[] { hosts[3] }, blockSize * 2, blockSize);
    blockLocations[2] = new BlockLocation(new String[] { names[2] }, new String[] { hosts[2] }, blockSize * 3, blockSize);
    return blockLocations;
}

29. TestAffinityCalculator#buildBlockLocations()

Project: drill
Source File: TestAffinityCalculator.java
View license
public BlockLocation[] buildBlockLocations(String[] hosts, long blockSize) {
    String[] names = new String[hosts.length];
    for (int i = 0; i < hosts.length; i++) {
        hosts[i] = "host" + i;
        names[i] = "host:" + port;
    }
    BlockLocation[] blockLocations = new BlockLocation[3];
    blockLocations[0] = new BlockLocation(new String[] { names[0], names[1], names[2] }, new String[] { hosts[0], hosts[1], hosts[2] }, 0, blockSize);
    blockLocations[1] = new BlockLocation(new String[] { names[0], names[2], names[3] }, new String[] { hosts[0], hosts[2], hosts[3] }, blockSize, blockSize);
    blockLocations[2] = new BlockLocation(new String[] { names[0], names[1], names[3] }, new String[] { hosts[0], hosts[1], hosts[3] }, blockSize * 2, blockSize);
    return blockLocations;
}

30. FileInputFormat#getBlockIndex()

View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

31. FileInputFormat#getBlockIndex()

View license
protected int getBlockIndex(BlockLocation[] blkLocations, long offset) {
    for (int i = 0; i < blkLocations.length; i++) {
        // is the offset inside this block?
        if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())) {
            return i;
        }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")");
}

32. IgniteHadoopFileSystemAbstractSelfTest#testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath()

View license
/** @throws Exception If failed. */
public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception {
    Path path = new Path("someFile");
    fs.create(path).close();
    final FileStatus status = fs.getFileStatus(path);
    fs.delete(path, true);
    BlockLocation[] locations = fs.getFileBlockLocations(status, 1, 2);
    assertEquals(0, locations.length);
}

33. RaidNode#doFileRaid()

Project: hadoop-20
Source File: RaidNode.java
View license
/**
   * RAID an individual file
   * @throws InterruptedException 
   */
private static LOGRESULTS doFileRaid(Configuration conf, EncodingCandidate ec, Path destPath, Codec codec, Statistics statistics, Progressable reporter, boolean doSimulate, int targetRepl, int metaRepl) throws IOException, InterruptedException {
    FileStatus stat = ec.srcStat;
    Path p = stat.getPath();
    FileSystem srcFs = p.getFileSystem(conf);
    // extract block locations from File system
    BlockLocation[] locations = srcFs.getFileBlockLocations(stat, 0, stat.getLen());
    // if the file has fewer than 2 blocks, then nothing to do
    if (locations.length <= 2) {
        return LOGRESULTS.NOACTION;
    }
    // add up the raw disk space occupied by this file
    long diskSpace = 0;
    for (BlockLocation l : locations) {
        diskSpace += (l.getLength() * stat.getReplication());
    }
    statistics.numProcessedBlocks += locations.length;
    statistics.processedSize += diskSpace;
    // generate parity file
    boolean parityGenerated = generateParityFile(conf, ec, targetRepl, reporter, srcFs, destPath, codec, locations.length, stat.getReplication(), metaRepl, stat.getBlockSize(), null);
    if (!parityGenerated) {
        return LOGRESULTS.NOACTION;
    }
    if (!doSimulate) {
        if (srcFs.setReplication(p, (short) targetRepl) == false) {
            LOG.info("Error in reducing replication of " + p + " to " + targetRepl);
            statistics.remainingSize += diskSpace;
            return LOGRESULTS.FAILURE;
        }
        ;
    }
    diskSpace = 0;
    for (BlockLocation l : locations) {
        diskSpace += (l.getLength() * targetRepl);
    }
    statistics.remainingSize += diskSpace;
    // the metafile will have this many number of blocks
    int numMeta = locations.length / codec.stripeLength;
    if (locations.length % codec.stripeLength != 0) {
        numMeta++;
    }
    // we create numMeta for every file. This metablock has metaRepl # replicas.
    // the last block of the metafile might not be completely filled up, but we
    // ignore that for now.
    statistics.numMetaBlocks += (numMeta * metaRepl);
    statistics.metaSize += (numMeta * metaRepl * stat.getBlockSize());
    return LOGRESULTS.SUCCESS;
}

34. BlockMap#computeBlocks()

Project: asakusafw
Source File: BlockMap.java
View license
/**
     * Returns a list of {@link BlockInfo} for the target file.
     * @param fs the target file
     * @param status the target file status
     * @return the computed information
     * @throws IOException if failed to compute information
     */
public static List<BlockInfo> computeBlocks(FileSystem fs, FileStatus status) throws IOException {
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
    List<BlockInfo> results = new ArrayList<>();
    for (BlockLocation location : locations) {
        long length = location.getLength();
        long start = location.getOffset();
        results.add(new BlockInfo(start, start + length, location.getHosts()));
    }
    return results;
}

35. TestDFSUtil#testLocatedBlocks2Locations()

Project: hadoop-20
Source File: TestDFSUtil.java
View license
/**
   * Test conversion of LocatedBlock to BlockLocation
   */
@Test
public void testLocatedBlocks2Locations() {
    DatanodeInfo d = new DatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;
    // ok
    Block b1 = new Block(1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);
    // corrupt
    Block b2 = new Block(2, 1, 1);
    LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);
    List<LocatedBlock> ls = Arrays.asList(l1, l2);
    LocatedBlocks lbs = new LocatedBlocks(10, ls, false);
    BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
    assertTrue("expected 2 blocks but got " + bs.length, bs.length == 2);
    int corruptCount = 0;
    for (BlockLocation b : bs) {
        if (b.isCorrupt()) {
            corruptCount++;
        }
    }
    assertTrue("expected 1 corrupt files but got " + corruptCount, corruptCount == 1);
    // test an empty location
    bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
    assertEquals(0, bs.length);
}

36. RaidUtils#checkParityBlocks()

Project: hadoop-20
Source File: RaidUtils.java
View license
/**
   * checks the parity blocks for a given file and modifies
   * corruptBlocksPerStripe accordingly
   */
private static void checkParityBlocks(final Path filePath, final Map<Integer, Integer> corruptBlocksPerStripe, final long blockSize, final long startStripeIdx, final long endStripeIdx, final long numStripes, final RaidInfo raidInfo) throws IOException {
    // get the blocks of the parity file
    // because of har, multiple blocks may be returned as one container block
    BlockLocation[] containerBlocks = getParityBlocks(filePath, blockSize, numStripes, raidInfo);
    long parityStripeLength = blockSize * ((long) raidInfo.parityBlocksPerStripe);
    long parityBlocksFound = 0L;
    for (BlockLocation cb : containerBlocks) {
        if (cb.getLength() % blockSize != 0) {
            throw new IOException("container block size is not " + "multiple of parity block size");
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("found container with offset " + cb.getOffset() + ", length " + cb.getLength());
        }
        for (long offset = cb.getOffset(); offset < cb.getOffset() + cb.getLength(); offset += blockSize) {
            long block = offset / blockSize;
            int stripe = (int) (offset / parityStripeLength);
            if (stripe < 0) {
                // before the beginning of the parity file
                continue;
            }
            if (stripe >= numStripes) {
                // past the end of the parity file
                break;
            }
            parityBlocksFound++;
            if (stripe < startStripeIdx || stripe >= endStripeIdx) {
                continue;
            }
            if (isBlockCorrupt(cb)) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("parity file for " + filePath.toString() + " corrupt in block " + block + ", stripe " + stripe + "/" + numStripes);
                }
                incCorruptBlocksPerStripe(corruptBlocksPerStripe, stripe);
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("parity file for " + filePath.toString() + " OK in block " + block + ", stripe " + stripe + "/" + numStripes);
                }
            }
        }
    }
    long parityBlocksExpected = raidInfo.parityBlocksPerStripe * numStripes;
    if (parityBlocksFound != parityBlocksExpected) {
        throw new IOException("expected " + parityBlocksExpected + " parity blocks but got " + parityBlocksFound);
    }
}

37. RaidUtils#collectDirectoryCorruptBlocksInStripe()

Project: hadoop-20
Source File: RaidUtils.java
View license
public static void collectDirectoryCorruptBlocksInStripe(final Configuration conf, final DistributedFileSystem dfs, final RaidInfo raidInfo, final FileStatus fileStatus, Map<Integer, Integer> corruptBlocksPerStripe) throws IOException {
    final int stripeSize = raidInfo.codec.stripeLength;
    final Path filePath = fileStatus.getPath();
    final BlockLocation[] fileBlocks = dfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
    LocationPair lp = StripeReader.getBlockLocation(raidInfo.codec, dfs, filePath, 0, conf, raidInfo.parityPair.getListFileStatus());
    int startBlockIdx = lp.getStripeIdx() * stripeSize + lp.getBlockIdxInStripe();
    int startStripeIdx = lp.getStripeIdx();
    long endStripeIdx = RaidNode.numStripes((long) (startBlockIdx + fileBlocks.length), stripeSize);
    long blockSize = DirectoryStripeReader.getParityBlockSize(conf, lp.getListFileStatus());
    long numBlocks = DirectoryStripeReader.getBlockNum(lp.getListFileStatus());
    HashMap<Integer, Integer> allCorruptBlocksPerStripe = new HashMap<Integer, Integer>();
    checkParityBlocks(filePath, allCorruptBlocksPerStripe, blockSize, startStripeIdx, endStripeIdx, RaidNode.numStripes(numBlocks, stripeSize), raidInfo);
    DirectoryStripeReader sReader = new DirectoryStripeReader(conf, raidInfo.codec, dfs, lp.getStripeIdx(), -1L, filePath.getParent(), lp.getListFileStatus());
    // Get the corrupt block information for all stripes related to the file
    while (sReader.getCurrentStripeIdx() < endStripeIdx) {
        int stripe = (int) sReader.getCurrentStripeIdx();
        BlockLocation[] bls = sReader.getNextStripeBlockLocations();
        for (BlockLocation bl : bls) {
            if (isBlockCorrupt(bl)) {
                incCorruptBlocksPerStripe(allCorruptBlocksPerStripe, stripe);
            }
        }
    }
    // figure out which stripes these corrupted blocks belong to
    for (BlockLocation fileBlock : fileBlocks) {
        int blockNo = startBlockIdx + (int) (fileBlock.getOffset() / fileStatus.getBlockSize());
        final int stripe = blockNo / stripeSize;
        if (isBlockCorrupt(fileBlock)) {
            corruptBlocksPerStripe.put(stripe, allCorruptBlocksPerStripe.get(stripe));
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " corrupt in block " + blockNo + ", stripe " + stripe);
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " OK in block " + blockNo + ", stripe " + stripe);
            }
        }
    }
}

38. RaidUtils#collectFileCorruptBlocksInStripe()

Project: hadoop-20
Source File: RaidUtils.java
View license
public static void collectFileCorruptBlocksInStripe(final DistributedFileSystem dfs, final RaidInfo raidInfo, final FileStatus fileStatus, final Map<Integer, Integer> corruptBlocksPerStripe) throws IOException {
    // read conf
    final int stripeBlocks = raidInfo.codec.stripeLength;
    // figure out which blocks are missing/corrupted
    final Path filePath = fileStatus.getPath();
    final long blockSize = fileStatus.getBlockSize();
    final long fileLength = fileStatus.getLen();
    final long fileLengthInBlocks = RaidNode.numBlocks(fileStatus);
    final long fileStripes = RaidNode.numStripes(fileLengthInBlocks, stripeBlocks);
    final BlockLocation[] fileBlocks = dfs.getFileBlockLocations(fileStatus, 0, fileLength);
    // figure out which stripes these corrupted blocks belong to
    for (BlockLocation fileBlock : fileBlocks) {
        int blockNo = (int) (fileBlock.getOffset() / blockSize);
        final int stripe = blockNo / stripeBlocks;
        if (isBlockCorrupt(fileBlock)) {
            incCorruptBlocksPerStripe(corruptBlocksPerStripe, stripe);
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " corrupt in block " + blockNo + "/" + fileLengthInBlocks + ", stripe " + stripe + "/" + fileStripes);
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("file " + filePath.toString() + " OK in block " + blockNo + "/" + fileLengthInBlocks + ", stripe " + stripe + "/" + fileStripes);
            }
        }
    }
    checkParityBlocks(filePath, corruptBlocksPerStripe, blockSize, 0, fileStripes, fileStripes, raidInfo);
}

39. IgniteHadoopFileSystemAbstractSelfTest#testGetFileBlockLocations()

View license
/** @throws Exception If failed. */
public void testGetFileBlockLocations() throws Exception {
    Path igfsHome = new Path(PRIMARY_URI);
    Path file = new Path(igfsHome, "someFile");
    try (OutputStream out = new BufferedOutputStream(fs.create(file, true, 1024 * 1024))) {
        byte[] data = new byte[128 * 1024];
        for (int i = 0; i < 100; i++) out.write(data);
        out.flush();
    }
    try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
        byte[] data = new byte[128 * 1024];
        int read;
        do {
            read = in.read(data);
        } while (read > 0);
    }
    FileStatus status = fs.getFileStatus(file);
    int grpLen = 128 * 512 * 1024;
    int grpCnt = (int) ((status.getLen() + grpLen - 1) / grpLen);
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
    assertEquals(grpCnt, locations.length);
}

40. TestGetSplitHosts#testGetSplitHosts()

Project: hadoop-20
Source File: TestGetSplitHosts.java
View license
public void testGetSplitHosts() throws Exception {
    int numBlocks = 3;
    int block1Size = 100, block2Size = 150, block3Size = 75;
    int fileSize = block1Size + block2Size + block3Size;
    int replicationFactor = 3;
    NetworkTopology clusterMap = new NetworkTopology();
    BlockLocation[] bs = new BlockLocation[numBlocks];
    String[] block1Hosts = { "host1", "host2", "host3" };
    String[] block1Names = { "host1:100", "host2:100", "host3:100" };
    String[] block1Racks = { "/rack1/", "/rack1/", "/rack2/" };
    String[] block1Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block1Paths[i] = block1Racks[i] + block1Names[i];
    }
    bs[0] = new BlockLocation(block1Names, block1Hosts, block1Paths, 0, block1Size);
    String[] block2Hosts = { "host4", "host5", "host6" };
    String[] block2Names = { "host4:100", "host5:100", "host6:100" };
    String[] block2Racks = { "/rack2/", "/rack3/", "/rack3/" };
    String[] block2Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block2Paths[i] = block2Racks[i] + block2Names[i];
    }
    bs[1] = new BlockLocation(block2Names, block2Hosts, block2Paths, block1Size, block2Size);
    String[] block3Hosts = { "host1", "host7", "host8" };
    String[] block3Names = { "host1:100", "host7:100", "host8:100" };
    String[] block3Racks = { "/rack1/", "/rack4/", "/rack4/" };
    String[] block3Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block3Paths[i] = block3Racks[i] + block3Names[i];
    }
    bs[2] = new BlockLocation(block3Names, block3Hosts, block3Paths, block1Size + block2Size, block3Size);
    SequenceFileInputFormat<String, String> sif = new SequenceFileInputFormat<String, String>();
    String[] hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // Contributions By Racks are
    // Rack1   175       
    // Rack2   275       
    // Rack3   150       
    // So, Rack2 hosts, host4 and host 3 should be returned
    // even if their individual contribution is not the highest
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host4"));
    assertTrue(hosts[1].equalsIgnoreCase("host3"));
    assertTrue(hosts[2].equalsIgnoreCase("host1"));
    // Now Create the blocks without topology information
    bs[0] = new BlockLocation(block1Names, block1Hosts, 0, block1Size);
    bs[1] = new BlockLocation(block2Names, block2Hosts, block1Size, block2Size);
    bs[2] = new BlockLocation(block3Names, block3Hosts, block1Size + block2Size, block3Size);
    hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // host1 makes the highest contribution among all hosts
    // So, that should be returned before others
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host1"));
}

41. AegisthusInputFormat#getSSTableSplitsForFile()

Project: aegisthus
Source File: AegisthusInputFormat.java
View license
/**
     * The main thing that the addSSTableSplit handles is to split SSTables
     * using their index if available. The general algorithm is that if the file
     * is large than the blocksize plus some fuzzy factor to
     */
List<InputSplit> getSSTableSplitsForFile(JobContext job, FileStatus file) throws IOException {
    long length = file.getLen();
    if (length == 0) {
        LOG.info("skipping zero length file: {}", file.getPath());
        return Collections.emptyList();
    }
    Path path = file.getPath();
    FileSystem fs = path.getFileSystem(job.getConfiguration());
    BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
    Path compressionPath = new Path(path.getParent(), path.getName().replaceAll("-Data.db", "-CompressionInfo.db"));
    if (fs.exists(compressionPath)) {
        return ImmutableList.of((InputSplit) AegCompressedSplit.createAegCompressedSplit(path, 0, length, blkLocations[blkLocations.length - 1].getHosts(), compressionPath));
    }
    long blockSize = file.getBlockSize();
    String aegisthusBlockSize = job.getConfiguration().get(Aegisthus.Feature.CONF_BLOCKSIZE);
    if (!Strings.isNullOrEmpty(aegisthusBlockSize)) {
        blockSize = Long.valueOf(aegisthusBlockSize);
    }
    long maxSplitSize = (long) (blockSize * .99);
    long fuzzySplit = (long) (blockSize * 1.2);
    long bytesRemaining = length;
    List<InputSplit> splits = Lists.newArrayList();
    IndexDatabaseScanner scanner = null;
    // Only initialize if we are going to have more than a single split
    if (fuzzySplit < length) {
        Path indexPath = new Path(path.getParent(), path.getName().replaceAll("-Data.db", "-Index.db"));
        if (!fs.exists(indexPath)) {
            fuzzySplit = length;
        } else {
            FSDataInputStream fileIn = fs.open(indexPath);
            scanner = new IndexDatabaseScanner(new BufferedInputStream(fileIn));
        }
    }
    long splitStart = 0;
    while (splitStart + fuzzySplit < length && scanner != null && scanner.hasNext()) {
        long splitSize = 0;
        // The scanner returns an offset from the start of the file.
        while (splitSize < maxSplitSize && scanner.hasNext()) {
            IndexDatabaseScanner.OffsetInfo offsetInfo = scanner.next();
            splitSize = offsetInfo.getDataFileOffset() - splitStart;
        }
        int blkIndex = getBlockIndex(blkLocations, splitStart + (splitSize / 2));
        LOG.debug("split path: {}:{}:{}", path.getName(), splitStart, splitSize);
        splits.add(AegSplit.createSplit(path, splitStart, splitSize, blkLocations[blkIndex].getHosts()));
        bytesRemaining -= splitSize;
        splitStart += splitSize;
    }
    if (scanner != null) {
        scanner.close();
    }
    if (bytesRemaining != 0) {
        LOG.debug("end path: {}:{}:{}", path.getName(), length - bytesRemaining, bytesRemaining);
        splits.add(AegSplit.createSplit(path, length - bytesRemaining, bytesRemaining, blkLocations[blkLocations.length - 1].getHosts()));
    }
    return splits;
}

42. HadoopIgfs20FileSystemAbstractSelfTest#testGetFileBlockLocations()

View license
/** @throws Exception If failed. */
public void testGetFileBlockLocations() throws Exception {
    Path igfsHome = new Path(primaryFsUri);
    Path file = new Path(igfsHome, "someFile");
    try (OutputStream out = new BufferedOutputStream(fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())))) {
        byte[] data = new byte[128 * 1024];
        for (int i = 0; i < 100; i++) out.write(data);
        out.flush();
    }
    try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
        byte[] data = new byte[128 * 1024];
        int read;
        do {
            read = in.read(data);
        } while (read > 0);
    }
    FileStatus status = fs.getFileStatus(file);
    int grpLen = 128 * 512 * 1024;
    int grpCnt = (int) ((status.getLen() + grpLen - 1) / grpLen);
    BlockLocation[] locations = fs.getFileBlockLocations(file, 0, status.getLen());
    assertEquals(grpCnt, locations.length);
}

43. CassandraFileSystemThriftStore#getBlockLocation()

View license
public BlockLocation[] getBlockLocation(List<Block> blocks, long start, long len) throws IOException {
    if (blocks.isEmpty())
        return null;
    List<ByteBuffer> blockKeys = new ArrayList<ByteBuffer>(blocks.size());
    for (Block b : blocks) blockKeys.add(uuidToByteBuffer(b.id));
    BlockLocation[] locations = new BlockLocation[blocks.size()];
    try {
        List<List<String>> blockEndpoints = ((Brisk.Iface) client).describe_keys(keySpace, blockKeys);
        for (int i = 0; i < blockEndpoints.size(); i++) {
            List<String> endpoints = blockEndpoints.get(i);
            Block b = blocks.get(i);
            long offset = (i == 0 && b.offset > start) ? start : b.offset;
            // TODO: Add topology info if at all possible?
            locations[i] = new BlockLocation(null, endpoints.toArray(new String[0]), offset, b.length);
        }
        return locations;
    } catch (Exception e) {
        throw new IOException(e);
    }
}

44. TestGetSplitHosts#testGetSplitHosts()

View license
public void testGetSplitHosts() throws Exception {
    int numBlocks = 3;
    int block1Size = 100, block2Size = 150, block3Size = 75;
    int fileSize = block1Size + block2Size + block3Size;
    int replicationFactor = 3;
    NetworkTopology clusterMap = new NetworkTopology();
    BlockLocation[] bs = new BlockLocation[numBlocks];
    String[] block1Hosts = { "host1", "host2", "host3" };
    String[] block1Names = { "host1:100", "host2:100", "host3:100" };
    String[] block1Racks = { "/rack1/", "/rack1/", "/rack2/" };
    String[] block1Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block1Paths[i] = block1Racks[i] + block1Names[i];
    }
    bs[0] = new BlockLocation(block1Names, block1Hosts, block1Paths, 0, block1Size);
    String[] block2Hosts = { "host4", "host5", "host6" };
    String[] block2Names = { "host4:100", "host5:100", "host6:100" };
    String[] block2Racks = { "/rack2/", "/rack3/", "/rack3/" };
    String[] block2Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block2Paths[i] = block2Racks[i] + block2Names[i];
    }
    bs[1] = new BlockLocation(block2Names, block2Hosts, block2Paths, block1Size, block2Size);
    String[] block3Hosts = { "host1", "host7", "host8" };
    String[] block3Names = { "host1:100", "host7:100", "host8:100" };
    String[] block3Racks = { "/rack1/", "/rack4/", "/rack4/" };
    String[] block3Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block3Paths[i] = block3Racks[i] + block3Names[i];
    }
    bs[2] = new BlockLocation(block3Names, block3Hosts, block3Paths, block1Size + block2Size, block3Size);
    SequenceFileInputFormat<String, String> sif = new SequenceFileInputFormat<String, String>();
    String[] hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // Contributions By Racks are
    // Rack1   175       
    // Rack2   275       
    // Rack3   150       
    // So, Rack2 hosts, host4 and host 3 should be returned
    // even if their individual contribution is not the highest
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host4"));
    assertTrue(hosts[1].equalsIgnoreCase("host3"));
    assertTrue(hosts[2].equalsIgnoreCase("host1"));
    // Now Create the blocks without topology information
    bs[0] = new BlockLocation(block1Names, block1Hosts, 0, block1Size);
    bs[1] = new BlockLocation(block2Names, block2Hosts, block1Size, block2Size);
    bs[2] = new BlockLocation(block3Names, block3Hosts, block1Size + block2Size, block3Size);
    hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // host1 makes the highest contribution among all hosts
    // So, that should be returned before others
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host1"));
}

45. CassandraFileSystemTest#testFileSystem()

Project: brisk
Source File: CassandraFileSystemTest.java
View license
private void testFileSystem(boolean flush) throws Exception {
    CassandraFileSystem fs = new CassandraFileSystem();
    fs.initialize(URI.create("cfs://localhost:" + DatabaseDescriptor.getRpcPort() + "/"), new Configuration());
    fs.mkdirs(new Path("/mytestdir"));
    fs.mkdirs(new Path("/mytestdir/sub1"));
    fs.mkdirs(new Path("/mytestdir/sub2"));
    fs.mkdirs(new Path("/mytestdir/sub3"));
    fs.mkdirs(new Path("/mytestdir/sub3/sub4"));
    //Create a 1MB file to sent to fs
    File tmp = File.createTempFile("testcfs", "input");
    Writer writer = new FileWriter(tmp);
    char buf[] = new char[1024];
    fillArray(buf);
    for (int i = 0; i < 1024; i++) writer.write(buf);
    writer.close();
    tmp.deleteOnExit();
    //Write file
    fs.copyFromLocalFile(new Path("file://" + tmp.getAbsolutePath()), new Path("/mytestdir/testfile"));
    if (flush) {
        List<Future<?>> cb = Table.open("cfs").flush();
        for (Future c : cb) c.get();
    }
    Set<Path> allPaths = fs.store.listDeepSubPaths(new Path("/mytestdir"));
    //Verify deep paths
    assertEquals(5, allPaths.size());
    //verify shallow path
    Set<Path> thisPath = fs.store.listSubPaths(new Path("/mytestdir"));
    assertEquals(4, thisPath.size());
    //Check file status
    FileStatus stat = fs.getFileStatus(new Path("/mytestdir/testfile"));
    assertEquals(tmp.getAbsoluteFile().length(), stat.getLen());
    assertEquals(false, stat.isDir());
    //Check block info
    BlockLocation[] info = fs.getFileBlockLocations(stat, 0, stat.getLen());
    assertEquals(1, info.length);
    assertEquals(FBUtilities.getLocalAddress().getHostName(), info[0].getHosts()[0]);
    info = fs.getFileBlockLocations(stat, 1, 10);
    assertTrue(info.length == 1);
    info = fs.getFileBlockLocations(stat, 0, 200);
    assertTrue(info.length == 1);
    //Check dir status
    stat = fs.getFileStatus(new Path("/mytestdir"));
    assertEquals(true, stat.isDir());
    //Read back the file
    File out = File.createTempFile("testcfs", "output");
    fs.copyToLocalFile(new Path("/mytestdir/testfile"), new Path("file://" + out.getAbsolutePath()));
    Reader reader = new FileReader(out);
    for (int i = 0; i < 1024; i++) {
        assertEquals(1024, reader.read(buf));
    }
    assertEquals(-1, reader.read());
    reader.close();
    out.deleteOnExit();
    // Verify the digests
    assertDigest(tmp, out);
}

46. StreamDataFileSplitter#computeSplits()

Project: cdap
Source File: StreamDataFileSplitter.java
View license
/**
   * Computes splits for the event file.
   */
<T> void computeSplits(FileSystem fs, long minSplitSize, long maxSplitSize, long startTime, long endTime, List<T> splits, StreamInputSplitFactory<T> splitFactory) throws IOException {
    // Compute the splits based on the min/max size
    Path eventFile = eventFileStatus.getPath();
    Path indexFile = getIndexFile(eventFile);
    BlockLocation[] blockLocations = fs.getFileBlockLocations(eventFile, 0, eventFileStatus.getLen());
    long length = eventFileStatus.getLen();
    long offset = 0;
    int blockIndex = 0;
    while (offset < length) {
        blockIndex = getBlockIndex(blockLocations, offset, blockIndex);
        String[] hosts = null;
        if (blockIndex >= 0) {
            hosts = blockLocations[blockIndex].getHosts();
        } else {
            blockIndex = 0;
        }
        long splitSize = computeSplitSize(eventFileStatus, offset, minSplitSize, maxSplitSize);
        splits.add(splitFactory.createSplit(eventFile, indexFile, startTime, endTime, offset, splitSize, hosts));
        offset += splitSize;
    }
    // One extra split for the tail of the file.
    splits.add(splitFactory.createSplit(eventFile, indexFile, startTime, endTime, offset, Long.MAX_VALUE, null));
}

47. DatanodeBenThread#getRunningDatanode()

Project: hadoop-20
Source File: DatanodeBenThread.java
View license
/**
     * Write a small file to figure out which datanode we are running
     */
private String getRunningDatanode(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.newInstance(conf);
    fs.mkdirs(new Path("/tmp"));
    Path fileName = new Path("/tmp", rtc.task_name + System.currentTimeMillis() + rb.nextInt());
    if (fs.exists(fileName)) {
        fs.delete(fileName);
    }
    FSDataOutputStream out = null;
    byte[] buffer = new byte[1];
    buffer[0] = '0';
    try {
        out = fs.create(fileName, (short) 1);
        out.write(buffer, 0, 1);
    } finally {
        IOUtils.closeStream(out);
    }
    fs = getDFS(fs);
    assert fs instanceof DistributedFileSystem;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    BlockLocation[] lbs = dfs.getClient().getBlockLocations(fileName.toUri().getPath(), 0, 1);
    fs.delete(fileName);
    return lbs[0].getHosts()[0];
}

48. TestReplication#checkFile()

Project: hadoop-hdfs
Source File: TestReplication.java
View license
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSClient.createNamenode(conf);
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i].getName());
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}

49. RaidUtils#getParityBlocks()

Project: hadoop-20
Source File: RaidUtils.java
View license
/**
   * gets the parity blocks corresponding to file
   * returns the parity blocks in case of DFS
   * and the part blocks containing parity blocks
   * in case of HAR FS
   */
private static BlockLocation[] getParityBlocks(final Path filePath, final long blockSize, final long numStripes, final RaidInfo raidInfo) throws IOException {
    FileSystem parityFS = raidInfo.parityPair.getFileSystem();
    // get parity file metadata
    FileStatus parityFileStatus = raidInfo.parityPair.getFileStatus();
    long parityFileLength = parityFileStatus.getLen();
    if (parityFileLength != numStripes * raidInfo.parityBlocksPerStripe * blockSize) {
        throw new IOException("expected parity file of length" + (numStripes * raidInfo.parityBlocksPerStripe * blockSize) + " but got parity file of length " + parityFileLength);
    }
    BlockLocation[] parityBlocks = parityFS.getFileBlockLocations(parityFileStatus, 0L, parityFileLength);
    if (parityFS instanceof DistributedFileSystem || parityFS instanceof DistributedRaidFileSystem) {
        long parityBlockSize = parityFileStatus.getBlockSize();
        if (parityBlockSize != blockSize) {
            throw new IOException("file block size is " + blockSize + " but parity file block size is " + parityBlockSize);
        }
    } else if (parityFS instanceof HarFileSystem) {
        LOG.debug("HAR FS found");
    } else {
        LOG.warn("parity file system is not of a supported type");
    }
    return parityBlocks;
}

50. DFSClient#getBlockLocations()

Project: hadoop-hdfs
Source File: DFSClient.java
View license
/**
   * Get block location info about file
   * 
   * getBlockLocations() returns a list of hostnames that store 
   * data for a specific file region.  It returns a set of hostnames
   * for every block within the indicated region.
   *
   * This function is very useful when writing code that considers
   * data-placement when performing operations.  For example, the
   * MapReduce system tries to schedule tasks on the same machines
   * as the data-block the task processes. 
   */
public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException {
    LocatedBlocks blocks = callGetBlockLocations(namenode, src, start, length);
    if (blocks == null) {
        return new BlockLocation[0];
    }
    int nrBlocks = blocks.locatedBlockCount();
    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
    int idx = 0;
    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
        assert idx < nrBlocks : "Incorrect index";
        DatanodeInfo[] locations = blk.getLocations();
        String[] hosts = new String[locations.length];
        String[] names = new String[locations.length];
        String[] racks = new String[locations.length];
        for (int hCnt = 0; hCnt < locations.length; hCnt++) {
            hosts[hCnt] = locations[hCnt].getHostName();
            names[hCnt] = locations[hCnt].getName();
            NodeBase node = new NodeBase(names[hCnt], locations[hCnt].getNetworkLocation());
            racks[hCnt] = node.toString();
        }
        blkLocations[idx] = new BlockLocation(names, hosts, racks, blk.getStartOffset(), blk.getBlockSize());
        idx++;
    }
    return blkLocations;
}

51. TestGetSplitHosts#testGetSplitHosts()

View license
public void testGetSplitHosts() throws Exception {
    int numBlocks = 3;
    int block1Size = 100, block2Size = 150, block3Size = 75;
    int fileSize = block1Size + block2Size + block3Size;
    int replicationFactor = 3;
    NetworkTopology clusterMap = new NetworkTopology();
    BlockLocation[] bs = new BlockLocation[numBlocks];
    String[] block1Hosts = { "host1", "host2", "host3" };
    String[] block1Names = { "host1:100", "host2:100", "host3:100" };
    String[] block1Racks = { "/rack1/", "/rack1/", "/rack2/" };
    String[] block1Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block1Paths[i] = block1Racks[i] + block1Names[i];
    }
    bs[0] = new BlockLocation(block1Names, block1Hosts, block1Paths, 0, block1Size);
    String[] block2Hosts = { "host4", "host5", "host6" };
    String[] block2Names = { "host4:100", "host5:100", "host6:100" };
    String[] block2Racks = { "/rack2/", "/rack3/", "/rack3/" };
    String[] block2Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block2Paths[i] = block2Racks[i] + block2Names[i];
    }
    bs[1] = new BlockLocation(block2Names, block2Hosts, block2Paths, block1Size, block2Size);
    String[] block3Hosts = { "host1", "host7", "host8" };
    String[] block3Names = { "host1:100", "host7:100", "host8:100" };
    String[] block3Racks = { "/rack1/", "/rack4/", "/rack4/" };
    String[] block3Paths = new String[replicationFactor];
    for (int i = 0; i < replicationFactor; i++) {
        block3Paths[i] = block3Racks[i] + block3Names[i];
    }
    bs[2] = new BlockLocation(block3Names, block3Hosts, block3Paths, block1Size + block2Size, block3Size);
    SequenceFileInputFormat<String, String> sif = new SequenceFileInputFormat<String, String>();
    String[] hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // Contributions By Racks are
    // Rack1   175       
    // Rack2   275       
    // Rack3   150       
    // So, Rack2 hosts, host4 and host 3 should be returned
    // even if their individual contribution is not the highest
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host4"));
    assertTrue(hosts[1].equalsIgnoreCase("host3"));
    assertTrue(hosts[2].equalsIgnoreCase("host1"));
    // Now Create the blocks without topology information
    bs[0] = new BlockLocation(block1Names, block1Hosts, 0, block1Size);
    bs[1] = new BlockLocation(block2Names, block2Hosts, block1Size, block2Size);
    bs[2] = new BlockLocation(block3Names, block3Hosts, block1Size + block2Size, block3Size);
    hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
    // host1 makes the highest contribution among all hosts
    // So, that should be returned before others
    assertTrue(hosts.length == replicationFactor);
    assertTrue(hosts[0].equalsIgnoreCase("host1"));
}

52. TestReplication#checkFile()

Project: hadoop-common
Source File: TestReplication.java
View license
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSClient.createNamenode(conf);
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i].getName());
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}

53. DFSUtil#locatedBlocks2Locations()

Project: hadoop-20
Source File: DFSUtil.java
View license
/**
   * Convert a LocatedBlocks to BlockLocations[]
   * @param blocks a LocatedBlocks
   * @return an array of BlockLocations
   */
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
    if (blocks == null) {
        return new BlockLocation[0];
    }
    int nrBlocks = blocks.locatedBlockCount();
    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
    if (nrBlocks == 0) {
        return blkLocations;
    }
    int idx = 0;
    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
        assert idx < nrBlocks : "Incorrect index";
        DatanodeInfo[] locations = blk.getLocations();
        String[] hosts = new String[locations.length];
        String[] names = new String[locations.length];
        String[] racks = new String[locations.length];
        for (int hCnt = 0; hCnt < locations.length; hCnt++) {
            hosts[hCnt] = locations[hCnt].getHostName();
            names[hCnt] = locations[hCnt].getName();
            NodeBase node = new NodeBase(names[hCnt], locations[hCnt].getNetworkLocation());
            racks[hCnt] = node.toString();
        }
        blkLocations[idx] = new BlockLocation(names, hosts, racks, blk.getStartOffset(), blk.getBlockSize(), blk.isCorrupt());
        idx++;
    }
    return blkLocations;
}

54. TestReplication#checkFile()

Project: hadoop-20
Source File: TestReplication.java
View license
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSClient.createNamenode(conf);
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        String[] racks = blockLocations[i].getRacks();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            String matchedRack = null;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    matchedRack = racks[k];
                    break;
                }
            }
            assertTrue(found);
            assertEquals("Rack info should be equal", matchedRack, racks[j]);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i].getName());
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    if (conf.getClass("dfs.block.replicator.classname", null, BlockPlacementPolicy.class).equals(BlockPlacementPolicyConfigurable.class) && repl == 2) {
        // For BlockPlacementPolicyConfigurable we do in rack replication for r =
        // 2.
        assertFalse(isNotOnSameRack);
    } else {
        assertTrue(isNotOnSameRack);
    }
}