org.apache.hadoop.fs.FileSystem.Statistics

Here are the examples of the java api class org.apache.hadoop.fs.FileSystem.Statistics taken from open source projects.

1. Task#updateCounters()

Project: hadoop-20
Source File: Task.java
View license
private synchronized void updateCounters() {
    for (Statistics stat : FileSystem.getAllStatistics()) {
        String uriScheme = stat.getScheme();
        FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
        if (updater == null) {
            //new FileSystem has been found in the cache
            updater = new FileSystemStatisticUpdater(uriScheme, stat);
            statisticUpdaters.put(uriScheme, updater);
        }
        updater.updateCounters();
    }
    updateResourceCounters();
    updateCGResourceCounters();
    updateGCcounters();
}

2. Utils#getFsStatistics()

Project: incubator-tez
Source File: Utils.java
View license
/**
   * Gets a handle to the Statistics instance based on the scheme associated
   * with path.
   *
   * @param path the path.
   * @param conf the configuration to extract the scheme from if not part of
   *   the path.
   * @return a Statistics instance, or null if none is found for the scheme.
   */
@Private
public static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
    List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
    path = path.getFileSystem(conf).makeQualified(path);
    String scheme = path.toUri().getScheme();
    for (Statistics stats : FileSystem.getAllStatistics()) {
        if (stats.getScheme().equals(scheme)) {
            matchedStats.add(stats);
        }
    }
    return matchedStats;
}

3. MRTask#getFsStatistics()

Project: incubator-tez
Source File: MRTask.java
View license
/**
   * Gets a handle to the Statistics instance based on the scheme associated
   * with path.
   *
   * @param path the path.
   * @param conf the configuration to extract the scheme from if not part of
   *   the path.
   * @return a Statistics instance, or null if none is found for the scheme.
   */
@Private
public static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
    List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
    path = path.getFileSystem(conf).makeQualified(path);
    String scheme = path.toUri().getScheme();
    for (Statistics stats : FileSystem.getAllStatistics()) {
        if (stats.getScheme().equals(scheme)) {
            matchedStats.add(stats);
        }
    }
    return matchedStats;
}

4. Utils#getFsStatistics()

Project: tez
Source File: Utils.java
View license
/**
   * Gets a handle to the Statistics instance based on the scheme associated
   * with path.
   *
   * @param path the path.
   * @param conf the configuration to extract the scheme from if not part of
   *   the path.
   * @return a Statistics instance, or null if none is found for the scheme.
   */
@Private
public static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
    List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
    path = path.getFileSystem(conf).makeQualified(path);
    String scheme = path.toUri().getScheme();
    for (Statistics stats : FileSystem.getAllStatistics()) {
        if (stats.getScheme().equals(scheme)) {
            matchedStats.add(stats);
        }
    }
    return matchedStats;
}

5. MRTask#getFsStatistics()

Project: tez
Source File: MRTask.java
View license
/**
   * Gets a handle to the Statistics instance based on the scheme associated
   * with path.
   *
   * @param path the path.
   * @param conf the configuration to extract the scheme from if not part of
   *   the path.
   * @return a Statistics instance, or null if none is found for the scheme.
   */
@Private
public static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
    List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
    path = path.getFileSystem(conf).makeQualified(path);
    String scheme = path.toUri().getScheme();
    for (Statistics stats : FileSystem.getAllStatistics()) {
        if (stats.getScheme().equals(scheme)) {
            matchedStats.add(stats);
        }
    }
    return matchedStats;
}

6. Task#updateCounters()

Project: hadoop-mapreduce
Source File: Task.java
View license
private synchronized void updateCounters() {
    for (Statistics stat : FileSystem.getAllStatistics()) {
        String uriScheme = stat.getScheme();
        FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme);
        if (updater == null) {
            //new FileSystem has been found in the cache
            updater = new FileSystemStatisticUpdater(uriScheme, stat);
            statisticUpdaters.put(uriScheme, updater);
        }
        updater.updateCounters();
    }
}

7. JobFilePartitioner#run()

Project: hraven
Source File: JobFilePartitioner.java
View license
/*
   * Do the actual work.
   * 
   * @see org.apache.hadoop.util.Tool#run(java.lang.String[])
   */
@Override
public int run(String[] args) throws Exception {
    myConf = getConf();
    // Presume this is all HDFS paths, even when access as file://
    hdfs = FileSystem.get(myConf);
    // Grab input args and allow for -Dxyz style arguments
    String[] otherArgs = new GenericOptionsParser(myConf, args).getRemainingArgs();
    // Grab the arguments we're looking for.
    CommandLine commandLine = parseArgs(otherArgs);
    // Grab the input path argument
    input = commandLine.getOptionValue("i");
    LOG.info("input=" + input);
    // Grab the input path argument
    String output = commandLine.getOptionValue("o");
    LOG.info("output=" + output);
    skipExisting = commandLine.hasOption("s");
    LOG.info("skipExisting=" + skipExisting);
    moveFiles = commandLine.hasOption("m");
    LOG.info("moveFiles=" + moveFiles);
    if (skipExisting && moveFiles) {
        throw new IllegalArgumentException("Cannot use both options skipExisting and move simultaneously.");
    }
    if (commandLine.hasOption("x")) {
        try {
            maXretention = Integer.parseInt(commandLine.getOptionValue("x"));
        } catch (NumberFormatException nfe) {
            throw new IllegalArgumentException("maXretention option -x is is not a valid number: " + commandLine.getOptionValue("x"), nfe);
        }
        // Additional check
        if (maXretention < 0) {
            throw new IllegalArgumentException("Cannot retain less than 0 files. Specified maXretention option -x is: " + commandLine.getOptionValue("x"));
        }
        LOG.info("maXretention=" + maXretention);
        if (moveFiles) {
            throw new IllegalArgumentException("Cannot use both options maXretention and move simultaneously.");
        }
    } else {
        maXretention = Integer.MAX_VALUE;
    }
    outputPath = new Path(output);
    FileStatus outputFileStatus = hdfs.getFileStatus(outputPath);
    if (!outputFileStatus.isDir()) {
        throw new IOException("Output is not a directory" + outputFileStatus.getPath().getName());
    }
    Path inputPath = new Path(input);
    URI inputURI = inputPath.toUri();
    String inputScheme = inputURI.getScheme();
    LOG.info("input scheme is: " + inputScheme);
    // HDFS
    if ((inputScheme == null) || (hdfs.getUri().getScheme().equals(inputScheme))) {
        processHDFSSources(inputPath);
    } else if (inputScheme.equals("file")) {
        if (moveFiles) {
            throw new IllegalArgumentException("Cannot move files that are not already in hdfs. Input is not HDFS: " + input);
        }
        processPlainFileSources(inputURI);
    } else {
        throw new IllegalArgumentException("Cannot process files from this URI scheme: " + inputScheme);
    }
    Statistics statistics = FileSystem.getStatistics(outputPath.toUri().getScheme(), hdfs.getClass());
    if (statistics != null) {
        LOG.info("HDFS bytes read: " + statistics.getBytesRead());
        LOG.info("HDFS bytes written: " + statistics.getBytesWritten());
        LOG.info("HDFS read ops: " + statistics.getReadOps());
        System.out.println("HDFS large read ops: " + statistics.getLargeReadOps());
        LOG.info("HDFS write ops: " + statistics.getWriteOps());
    }
    return 0;
}

8. TaskCounterUpdater#updateCounters()

View license
public void updateCounters() {
    // FileSystemStatistics are reset each time a new task is seen by the
    // container.
    // This doesn't remove the fileSystem, and does not clear all statistics -
    // so there is a potential of an unused FileSystem showing up for a
    // Container, and strange values for READ_OPS etc.
    Map<String, List<FileSystem.Statistics>> map = new HashMap<String, List<FileSystem.Statistics>>();
    for (Statistics stat : FileSystem.getAllStatistics()) {
        String uriScheme = stat.getScheme();
        if (map.containsKey(uriScheme)) {
            List<FileSystem.Statistics> list = map.get(uriScheme);
            list.add(stat);
        } else {
            List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
            list.add(stat);
            map.put(uriScheme, list);
        }
    }
    for (Map.Entry<String, List<FileSystem.Statistics>> entry : map.entrySet()) {
        FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
        if (updater == null) {
            //new FileSystem has been found in the cache
            updater = new FileSystemStatisticUpdater(tezCounters, entry.getValue(), entry.getKey());
            statisticUpdaters.put(entry.getKey(), updater);
        }
        updater.updateCounters();
    }
    gcUpdater.incrementGcCounter();
    updateResourceCounters();
}

9. TaskCounterUpdater#updateCounters()

Project: tez
Source File: TaskCounterUpdater.java
View license
public void updateCounters() {
    // FileSystemStatistics are reset each time a new task is seen by the
    // container.
    // This doesn't remove the fileSystem, and does not clear all statistics -
    // so there is a potential of an unused FileSystem showing up for a
    // Container, and strange values for READ_OPS etc.
    Map<String, List<FileSystem.Statistics>> map = new HashMap<String, List<FileSystem.Statistics>>();
    for (Statistics stat : FileSystem.getAllStatistics()) {
        String uriScheme = stat.getScheme();
        if (map.containsKey(uriScheme)) {
            List<FileSystem.Statistics> list = map.get(uriScheme);
            list.add(stat);
        } else {
            List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
            list.add(stat);
            map.put(uriScheme, list);
        }
    }
    for (Map.Entry<String, List<FileSystem.Statistics>> entry : map.entrySet()) {
        FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
        if (updater == null) {
            //new FileSystem has been found in the cache
            updater = new FileSystemStatisticUpdater(tezCounters, entry.getValue(), entry.getKey());
            statisticUpdaters.put(entry.getKey(), updater);
        }
        updater.updateCounters();
    }
    gcUpdater.incrementGcCounter();
    updateResourceCounters();
}