org.apache.hadoop.fs.ChecksumFileSystem

Here are the examples of the java api class org.apache.hadoop.fs.ChecksumFileSystem taken from open source projects.

1. BenchmarkThroughput#run()

Project: hadoop-hdfs
File: BenchmarkThroughput.java
public int run(String[] args) throws IOException {
    // silence the minidfs cluster
    Log hadoopLog = LogFactory.getLog("org");
    if (hadoopLog instanceof Log4JLogger) {
        ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
    }
    int reps = 1;
    if (args.length == 1) {
        try {
            reps = Integer.parseInt(args[0]);
        } catch (NumberFormatException e) {
            printUsage();
            return -1;
        }
    } else if (args.length > 1) {
        printUsage();
        return -1;
    }
    Configuration conf = getConf();
    // the size of the file to write
    long SIZE = conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
    BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
    String localDir = conf.get("mapred.temp.dir");
    dir = new LocalDirAllocator("mapred.temp.dir");
    System.setProperty("test.build.data", localDir);
    System.out.println("Local = " + localDir);
    ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
    FileSystem rawLocal = checkedLocal.getRawFileSystem();
    for (int i = 0; i < reps; ++i) {
        writeAndReadLocalFile("local", conf, SIZE);
        writeAndReadFile(rawLocal, "raw", conf, SIZE);
        writeAndReadFile(checkedLocal, "checked", conf, SIZE);
    }
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster(conf, 1, true, new String[] { "/foo" });
        cluster.waitActive();
        FileSystem dfs = cluster.getFileSystem();
        for (int i = 0; i < reps; ++i) {
            writeAndReadFile(dfs, "dfs", conf, SIZE);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            // clean up minidfs junk
            rawLocal.delete(new Path(localDir, "dfs"), true);
        }
    }
    return 0;
}

2. BenchmarkThroughput#run()

Project: hadoop-common
File: BenchmarkThroughput.java
public int run(String[] args) throws IOException {
    // silence the minidfs cluster
    Log hadoopLog = LogFactory.getLog("org");
    if (hadoopLog instanceof Log4JLogger) {
        ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
    }
    int reps = 1;
    if (args.length == 1) {
        try {
            reps = Integer.parseInt(args[0]);
        } catch (NumberFormatException e) {
            printUsage();
            return -1;
        }
    } else if (args.length > 1) {
        printUsage();
        return -1;
    }
    Configuration conf = getConf();
    // the size of the file to write
    long SIZE = conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
    BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
    String localDir = conf.get("mapred.temp.dir");
    dir = new LocalDirAllocator("mapred.temp.dir");
    System.setProperty("test.build.data", localDir);
    System.out.println("Local = " + localDir);
    ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
    FileSystem rawLocal = checkedLocal.getRawFileSystem();
    for (int i = 0; i < reps; ++i) {
        writeAndReadLocalFile("local", conf, SIZE);
        writeAndReadFile(rawLocal, "raw", conf, SIZE);
        writeAndReadFile(checkedLocal, "checked", conf, SIZE);
    }
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster(conf, 1, true, new String[] { "/foo" });
        cluster.waitActive();
        FileSystem dfs = cluster.getFileSystem();
        for (int i = 0; i < reps; ++i) {
            writeAndReadFile(dfs, "dfs", conf, SIZE);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            // clean up minidfs junk
            rawLocal.delete(new Path(localDir, "dfs"), true);
        }
    }
    return 0;
}

3. TestFSInputChecker#testFSInputChecker()

Project: hadoop-20
File: TestFSInputChecker.java
public void testFSInputChecker() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.block.size", BLOCK_SIZE);
    conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
    rand.nextBytes(expected);
    // test DFS
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    ChecksumFileSystem fileSys = (ChecksumFileSystem) cluster.getFileSystem();
    try {
        testChecker(fileSys, true);
        testChecker(fileSys, false);
        testSeekAndRead(fileSys);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
    // test Local FS
    fileSys = FileSystem.getLocal(conf);
    try {
        testChecker(fileSys, true);
        testChecker(fileSys, false);
        testFileCorruption((LocalFileSystem) fileSys);
        testSeekAndRead(fileSys);
    } finally {
        fileSys.close();
    }
}

4. BenchmarkThroughput#run()

Project: hadoop-20
File: BenchmarkThroughput.java
public int run(String[] args) throws IOException {
    // silence the minidfs cluster
    Log hadoopLog = LogFactory.getLog("org");
    if (hadoopLog instanceof Log4JLogger) {
        ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
    }
    int reps = 1;
    if (args.length == 1) {
        try {
            reps = Integer.parseInt(args[0]);
        } catch (NumberFormatException e) {
            printUsage();
            return -1;
        }
    } else if (args.length > 1) {
        printUsage();
        return -1;
    }
    Configuration conf = getConf();
    // the size of the file to write
    long SIZE = conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
    BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
    String localDir = conf.get("mapred.temp.dir");
    dir = new LocalDirAllocator("mapred.temp.dir");
    System.setProperty("test.build.data", localDir);
    System.out.println("Local = " + localDir);
    ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
    FileSystem rawLocal = checkedLocal.getRawFileSystem();
    for (int i = 0; i < reps; ++i) {
        writeAndReadLocalFile("local", conf, SIZE);
        writeAndReadFile(rawLocal, "raw", conf, SIZE);
        writeAndReadFile(checkedLocal, "checked", conf, SIZE);
    }
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster(conf, 1, true, new String[] { "/foo" });
        cluster.waitActive();
        FileSystem dfs = cluster.getFileSystem();
        for (int i = 0; i < reps; ++i) {
            writeAndReadFile(dfs, "dfs", conf, SIZE);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            // clean up minidfs junk
            rawLocal.delete(new Path(localDir, "dfs"), true);
        }
    }
    return 0;
}