org.apache.hadoop.fs.DF

Here are the examples of the java api class org.apache.hadoop.fs.DF taken from open source projects.

1. TaskTracker#getLogDiskFreeSpace()

Project: hadoop-20
Source File: TaskTracker.java
View license
/**
   * Obtain the free space on the log disk. If the log disk is not configured,
   * returns Long.MAX_VALUE
   * @return The free space available.
   * @throws IOException
   */
long getLogDiskFreeSpace() throws IOException {
    String logDir = fConf.getLogDir();
    // If the log disk is not specified we assume it is usable.
    if (logDir == null) {
        return Long.MAX_VALUE;
    }
    DF df = localDirsDf.get(logDir);
    if (df == null) {
        df = new DF(new File(logDir), fConf);
        localDirsDf.put(logDir, df);
    }
    return df.getAvailable();
}

2. TestNamenodeCapacityReport#testVolumeSizeWithBytes()

View license
public void testVolumeSizeWithBytes() throws Exception {
    Configuration conf = new Configuration();
    File data_dir = MiniDFSCluster.getDataDirectory(conf);
    // Need to create data_dir, otherwise DF doesn't work on non-existent dir.
    data_dir.mkdirs();
    DF df = new DF(data_dir, conf);
    long reserved = 10000;
    conf.setLong("dfs.datanode.du.reserved", reserved);
    verifyVolumeSize(conf, reserved, df);
}

3. TestNamenodeCapacityReport#testVolumeSizeWithPercent()

View license
public void testVolumeSizeWithPercent() throws Exception {
    Configuration conf = new Configuration();
    File data_dir = MiniDFSCluster.getDataDirectory(conf);
    // Need to create data_dir, otherwise DF doesn't work on non-existent dir.
    data_dir.mkdirs();
    DF df = new DF(data_dir, conf);
    long reserved = (long) (df.getCapacity() * 0.215);
    conf.setFloat("dfs.datanode.du.reserved.percent", 21.5f);
    verifyVolumeSize(conf, reserved, df);
}