Here are the examples of the java api class org.apache.hadoop.fs.FilterFileSystem taken from open source projects.
1. TestFileSync#testFilterFileSystemForceSync()
Project: hadoop-20
File: TestFileSync.java
File: TestFileSync.java
/** * Test using forceSync on a FilterFileSystem; make sure the sync happens on the disk */ public void testFilterFileSystemForceSync() throws Exception { DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem(); FilterFileSystem filterFS = new FilterFileSystem(fileSystem); String filename = "/testFileForceSync"; Path path = new Path(filename); boolean forceSync = true; DFSClient dfsClient = ((DistributedFileSystem) fileSystem).getClient(); FSDataOutputStream out = filterFS.create(path, FsPermission.getDefault(), true, BUFFER_SIZE, REPLICATION_NUM, (long) BLOCK_SIZE, BYTES_PER_CHECKSUM, new Progressable() { @Override public void progress() { } }, forceSync); //make sure it is an empty file at beginning long fileSize = dfsClient.open(filename).getFileLength(); assertEquals(0, fileSize); //write 1 byte size out.write(DFSTestUtil.generateSequentialBytes(0, SINGLE_BYTE)); out.sync(); //make sure that the data has been synced to data node disk fileSize = dfsClient.open(filename).getFileLength(); assertEquals(SINGLE_BYTE, fileSize); //write buffer size data to file out.write(DFSTestUtil.generateSequentialBytes(0, BUFFER_SIZE)); out.sync(); //make sure that the data has been synced to data node disk fileSize = dfsClient.open(filename).getFileLength(); assertEquals(SINGLE_BYTE + BUFFER_SIZE, fileSize); //write block size data to file; it will cross block boundary. out.write(DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE)); out.sync(); //make sure that the data has been synced to data node disk fileSize = dfsClient.open(filename).getFileLength(); assertEquals(SINGLE_BYTE + BLOCK_SIZE + BUFFER_SIZE, fileSize); out.close(); }