Here are the examples of the java api class org.apache.hadoop.fs.FSDataOutputStream taken from open source projects.
1. TestHDFSIntegration#loadData()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadData(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f1.txt")); f1.writeChars("m1d1_t1\n"); f1.writeChars("m1d1_t2\n"); f1.writeChars("m1d1_t3\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f1.txt\' overwrite into table p1 partition (month=1, day=1)"); FSDataOutputStream f2 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f2.writeChars("m2d2_t4\n"); f2.writeChars("m2d2_t5\n"); f2.writeChars("m2d2_t6\n"); f2.flush(); f2.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=2, day=2)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(6, vals.size()); rs.close(); }
2. TestHDFSIntegration#loadData()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadData(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f1.txt")); f1.writeChars("m1d1_t1\n"); f1.writeChars("m1d1_t2\n"); f1.writeChars("m1d1_t3\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f1.txt\' overwrite into table p1 partition (month=1, day=1)"); FSDataOutputStream f2 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f2.writeChars("m2d2_t4\n"); f2.writeChars("m2d2_t5\n"); f2.writeChars("m2d2_t6\n"); f2.flush(); f2.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=2, day=2)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(6, vals.size()); rs.close(); }
3. TestHDFSIntegration#loadData()
Project: incubator-sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadData(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f1.txt")); f1.writeChars("m1d1_t1\n"); f1.writeChars("m1d1_t2\n"); f1.writeChars("m1d1_t3\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f1.txt\' overwrite into table p1 partition (month=1, day=1)"); FSDataOutputStream f2 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f2.writeChars("m2d2_t4\n"); f2.writeChars("m2d2_t5\n"); f2.writeChars("m2d2_t6\n"); f2.flush(); f2.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=2, day=2)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(6, vals.size()); rs.close(); }
4. TestCSVFileReader#createCSVFiles()
Project: kite
File: TestCSVFileReader.java
File: TestCSVFileReader.java
@BeforeClass public static void createCSVFiles() throws IOException { localfs = LocalFileSystem.getInstance(); csvFile = new Path("target/temp.csv"); reorderedFile = new Path("target/reordered.csv"); tsvFile = new Path("target/temp.tsv"); validatorFile = new Path("target/validator.csv"); FSDataOutputStream out = localfs.create(csvFile, true); out.writeBytes(CSV_CONTENT); out.close(); out = localfs.create(reorderedFile, true); out.writeBytes(REORDERED_CSV_CONTENT); out.close(); out = localfs.create(validatorFile, true); out.writeBytes(VALIDATOR_CSV_CONTENT); out.close(); out = localfs.create(tsvFile, true); out.writeBytes(TSV_CONTENT); out.close(); }
5. TestFSOutputSummer#writeFile3()
Project: hadoop-hdfs
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data with vairable amount of data */ private void writeFile3(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); stm.write(expected, 0, HALF_CHUNK_SIZE); stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM + 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 2, 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 4, HALF_CHUNK_SIZE); stm.write(expected, BLOCK_SIZE + 4, BYTES_PER_CHECKSUM - 4); stm.write(expected, BLOCK_SIZE + BYTES_PER_CHECKSUM, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
6. TestFSOutputSummer#writeFile3()
Project: hadoop-common
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data with vairable amount of data */ private void writeFile3(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); stm.write(expected, 0, HALF_CHUNK_SIZE); stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM + 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 2, 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 4, HALF_CHUNK_SIZE); stm.write(expected, BLOCK_SIZE + 4, BYTES_PER_CHECKSUM - 4); stm.write(expected, BLOCK_SIZE + BYTES_PER_CHECKSUM, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
7. TestFSOutputSummer#writeFile3()
Project: hadoop-20
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data with vairable amount of data */ private void writeFile3(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); stm.write(expected, 0, HALF_CHUNK_SIZE); stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM + 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 2, 2); stm.write(expected, HALF_CHUNK_SIZE + BYTES_PER_CHECKSUM + 4, HALF_CHUNK_SIZE); stm.write(expected, BLOCK_SIZE + 4, BYTES_PER_CHECKSUM - 4); stm.write(expected, BLOCK_SIZE + BYTES_PER_CHECKSUM, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
8. TestHarFileSystem#setUp()
Project: hadoop-mapreduce
File: TestHarFileSystem.java
File: TestHarFileSystem.java
protected void setUp() throws Exception { super.setUp(); dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); inputPath = new Path(fs.getHomeDirectory(), "test"); inputrelPath = new Path(fs.getHomeDirectory().toUri().getPath().substring(1), "test"); filea = new Path(inputPath, "a"); fileb = new Path(inputPath, "b"); filec = new Path(inputPath, "c"); archivePath = new Path(fs.getHomeDirectory(), "tmp"); fs.mkdirs(inputPath); FSDataOutputStream out = fs.create(filea); out.write("a".getBytes()); out.close(); out = fs.create(fileb); out.write("b".getBytes()); out.close(); out = fs.create(filec); out.write("c".getBytes()); out.close(); }
9. TestCSVFileReader#createCSVFiles()
Project: cdk
File: TestCSVFileReader.java
File: TestCSVFileReader.java
@BeforeClass public static void createCSVFiles() throws IOException { localfs = FileSystem.getLocal(new Configuration()); csvFile = new Path("target/temp.csv"); tsvFile = new Path("target/temp.tsv"); validatorFile = new Path("target/validator.csv"); FSDataOutputStream out = localfs.create(csvFile, true); out.writeBytes(CSV_CONTENT); out.close(); out = localfs.create(validatorFile, true); out.writeBytes(VALIDATOR_CSV_CONTENT); out.close(); out = localfs.create(tsvFile, true); out.writeBytes(TSV_CONTENT); out.close(); }
10. TestHDFSIntegration#loadDataTwoCols()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadDataTwoCols(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f1.writeChars("m1d1_t1, m1d1_t2\n"); f1.writeChars("m1d1_t2, m1d1_t2\n"); f1.writeChars("m1d1_t3, m1d1_t2\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=1, day=1)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(3, vals.size()); rs.close(); }
11. TestHDFSIntegration#loadDataTwoCols()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadDataTwoCols(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f1.writeChars("m1d1_t1, m1d1_t2\n"); f1.writeChars("m1d1_t2, m1d1_t2\n"); f1.writeChars("m1d1_t3, m1d1_t2\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=1, day=1)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(3, vals.size()); rs.close(); }
12. TestHDFSIntegration#loadDataTwoCols()
Project: incubator-sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void loadDataTwoCols(Statement stmt) throws IOException, SQLException { FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt")); f1.writeChars("m1d1_t1, m1d1_t2\n"); f1.writeChars("m1d1_t2, m1d1_t2\n"); f1.writeChars("m1d1_t3, m1d1_t2\n"); f1.flush(); f1.close(); stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=1, day=1)"); ResultSet rs = stmt.executeQuery("select * from p1"); List<String> vals = new ArrayList<String>(); while (rs.next()) { vals.add(rs.getString(1)); } Assert.assertEquals(3, vals.size()); rs.close(); }
13. UtilsTest#testFirstNBytesSame()
Project: dfs-datastores
File: UtilsTest.java
File: UtilsTest.java
public void testFirstNBytesSame() throws IOException { FileSystem fs = FileSystem.get(new Configuration()); String path1 = getTmpPath(fs, "file1"); String path2 = getTmpPath(fs, "file2"); FSDataOutputStream os = fs.create(new Path(path1)); os.write(new byte[] { 1, 2, 3, 4, 10, 11, 12 }); os.close(); os = fs.create(new Path(path2)); os.write(new byte[] { 1, 2, 3, 4, 5, 6 }); os.close(); assertTrue(Utils.firstNBytesSame(fs, new Path(path1), fs, new Path(path2), 3)); assertTrue(Utils.firstNBytesSame(fs, new Path(path1), fs, new Path(path2), 4)); assertFalse(Utils.firstNBytesSame(fs, new Path(path1), fs, new Path(path2), 5)); assertFalse(Utils.firstNBytesSame(fs, new Path(path1), fs, new Path(path2), 1000)); }
14. HdfsThreadLeakTest#testBasic()
Project: lucene-solr
File: HdfsThreadLeakTest.java
File: HdfsThreadLeakTest.java
@Test public void testBasic() throws IOException { String uri = HdfsTestUtil.getURI(dfsCluster); Path path = new Path(uri); Configuration conf = new Configuration(); conf.setBoolean("fs.hdfs.impl.disable.cache", true); FileSystem fs = FileSystem.get(path.toUri(), conf); Path testFile = new Path(uri.toString() + "/testfile"); FSDataOutputStream out = fs.create(testFile); out.write(5); out.hflush(); out.close(); ((DistributedFileSystem) fs).recoverLease(testFile); fs.close(); }
15. IgniteHadoopFileSystemAbstractSelfTest#testAppendIfFileIsAlreadyBeingOpenedToWrite()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "someFile"); FSDataOutputStream os = fs.create(file); os.close(); FSDataOutputStream appendOs = fs.append(file); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { return fs.append(file); } }, IOException.class, null); appendOs.close(); }
16. HadoopIgfs20FileSystemAbstractSelfTest#testAppendIfFileIsAlreadyBeingOpenedToWrite()
Project: ignite
File: HadoopIgfs20FileSystemAbstractSelfTest.java
File: HadoopIgfs20FileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "someFile"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); FSDataOutputStream appendOs = fs.create(file, EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault())); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { return fs.create(file, EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault())); } }, IOException.class, null); appendOs.close(); }
17. TestLeaseRecovery3#createFile()
Project: hadoop-20
File: TestLeaseRecovery3.java
File: TestLeaseRecovery3.java
// try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. private Path createFile(DistributedFileSystem dfs, int size) throws IOException, InterruptedException { // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); System.out.println("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, bufferSize, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write random number of bytes into it. System.out.println("size=" + size); stm.write(buffer, 0, size); // sync file AppendTestUtil.LOG.info("sync"); stm.sync(); // write another piece of data to file. This piece of data // is not yet synced stm.write(buffer, 0, size); return filepath; }
18. TestHftpFileSystem#testConcurrentRead()
Project: hadoop-20
File: TestHftpFileSystem.java
File: TestHftpFileSystem.java
/** * Scenario: Read an under construction file using hftp. * * Expected: Hftp should be able to read the latest byte after the file * has been hdfsSynced (but not yet closed). * * @throws IOException */ public void testConcurrentRead() throws IOException { // Write a test file. FSDataOutputStream out = hdfs.create(TEST_FILE, true); out.writeBytes("123"); // sync but not close out.sync(); // Try read using hftp. FSDataInputStream in = hftpFs.open(TEST_FILE); assertEquals('1', in.read()); assertEquals('2', in.read()); assertEquals('3', in.read()); in.close(); // Try seek and read. in = hftpFs.open(TEST_FILE); in.seek(2); assertEquals('3', in.read()); in.close(); out.close(); }
19. TestAvatarSetQuota#testAvatarSetQuota()
Project: hadoop-20
File: TestAvatarSetQuota.java
File: TestAvatarSetQuota.java
@Test public void testAvatarSetQuota() throws Exception { String test = "/testAvatarSetQuota"; DFSTestUtil util = new DFSTestUtil(test, 10, 10, 1024); util.createFiles(fs, test); FSDataOutputStream out = fs.create(new Path(test + "/abc")); byte[] buffer = new byte[10 * 1024]; Random r = new Random(); r.nextBytes(buffer); out.write(buffer); out.sync(); ((DistributedFileSystem) fs).setQuota(new Path(test), 5, -1); out.close(); cluster.getStandbyAvatar(0).avatar.quiesceStandby(-1); }
20. TestHdfsSemantics#testDoubleCreateSemantics()
Project: apache-storm-test
File: TestHdfsSemantics.java
File: TestHdfsSemantics.java
@Test public void testDoubleCreateSemantics() throws Exception { //1 create an already existing open file w/o override flag Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); FSDataOutputStream os1 = fs.create(file1, false); try { // should fail fs.create(file1, false); Assert.assertTrue("Create did not throw an exception", false); } catch (RemoteException e) { Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); } //2 close file and retry creation os1.close(); try { // should still fail fs.create(file1, false); } catch (FileAlreadyExistsException e) { } //3 delete file and retry creation fs.delete(file1, false); // should pass FSDataOutputStream os2 = fs.create(file1, false); Assert.assertNotNull(os2); os2.close(); }
21. TestHdfsSemantics#testAppendSemantics()
Project: apache-storm-test
File: TestHdfsSemantics.java
File: TestHdfsSemantics.java
@Test public void testAppendSemantics() throws Exception { //1 try to append to an open file Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); FSDataOutputStream os1 = fs.create(file1, false); try { // should fail fs.append(file1); Assert.assertTrue("Append did not throw an exception", false); } catch (RemoteException e) { Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); } //2 try to append to a closed file os1.close(); // should pass FSDataOutputStream os2 = fs.append(file1); os2.close(); }
22. TestRaidDfs#createTestFilePartialLastBlock()
Project: hadoop-20
File: TestRaidDfs.java
File: TestRaidDfs.java
// // Creates a file with partially full last block. Populate it with random // data. Returns its crc. // public static long createTestFilePartialLastBlock(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize) throws IOException { CRC32 crc = new CRC32(); Random rand = new Random(); FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, blocksize); // Write whole blocks. byte[] b = new byte[(int) blocksize]; for (int i = 1; i < numBlocks; i++) { rand.nextBytes(b); stm.write(b); crc.update(b); } // Write partial block. b = new byte[(int) blocksize / 2 - 1]; rand.nextBytes(b); stm.write(b); crc.update(b); stm.close(); return crc.getValue(); }
23. TestHdfsProxy#createFile()
Project: hadoop-20
File: TestHdfsProxy.java
File: TestHdfsProxy.java
private static MyFile createFile(Path root, FileSystem fs, int levels) throws IOException { MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); Path p = new Path(root, f.getName()); FSDataOutputStream out = fs.create(p); byte[] toWrite = new byte[f.getSize()]; new Random(f.getSeed()).nextBytes(toWrite); out.write(toWrite); out.close(); FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); return f; }
24. TestDFSWrite#testDirectWrite()
Project: flume
File: TestDFSWrite.java
File: TestDFSWrite.java
@Test public void testDirectWrite() throws IOException { FlumeConfiguration conf = FlumeConfiguration.get(); Path path = new Path("file:///tmp/testfile"); FileSystem hdfs = path.getFileSystem(conf); hdfs.deleteOnExit(path); String STRING = "Hello World"; // writing FSDataOutputStream dos = hdfs.create(path); dos.writeUTF(STRING); dos.close(); // reading FSDataInputStream dis = hdfs.open(path); String s = dis.readUTF(); System.out.println(s); assertEquals(STRING, s); dis.close(); hdfs.close(); }
25. TestParquetScan#testSuccessFile()
Project: drill
File: TestParquetScan.java
File: TestParquetScan.java
@Test public void testSuccessFile() throws Exception { Path p = new Path("/tmp/nation_test_parquet_scan"); if (fs.exists(p)) { fs.delete(p, true); } fs.mkdirs(p); byte[] bytes = Resources.toByteArray(Resources.getResource("tpch/nation.parquet")); FSDataOutputStream os = fs.create(new Path(p, "nation.parquet")); os.write(bytes); os.close(); fs.create(new Path(p, "_SUCCESS")).close(); fs.create(new Path(p, "_logs")).close(); testBuilder().sqlQuery("select count(*) c from dfs.tmp.nation_test_parquet_scan where 1 = 1").unOrdered().baselineColumns("c").baselineValues(25L).build().run(); }
26. Metadata#writeFile()
Project: drill
File: Metadata.java
File: Metadata.java
/** * Serialize parquet metadata to json and write to a file * * @param parquetTableMetadata * @param p * @throws IOException */ private void writeFile(ParquetTableMetadata_v2 parquetTableMetadata, Path p) throws IOException { JsonFactory jsonFactory = new JsonFactory(); jsonFactory.configure(Feature.AUTO_CLOSE_TARGET, false); jsonFactory.configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); ObjectMapper mapper = new ObjectMapper(jsonFactory); SimpleModule module = new SimpleModule(); module.addSerializer(ColumnMetadata_v2.class, new ColumnMetadata_v2.Serializer()); mapper.registerModule(module); FSDataOutputStream os = fs.create(p); mapper.writerWithDefaultPrettyPrinter().writeValue(os, parquetTableMetadata); os.flush(); os.close(); }
27. CubertMD#writeMetaFile()
Project: Cubert
File: CubertMD.java
File: CubertMD.java
private static void writeMetaFile(String metaFilePath, HashMap<String, String> metaFileKeyValues) throws IOException { Job tempjob = new Job(); Configuration tempconf = tempjob.getConfiguration(); FileSystem fs = FileSystem.get(tempconf); FSDataOutputStream outStream = fs.create(new Path(metaFilePath + "/.meta")); for (String key : metaFileKeyValues.keySet()) outStream.write((key + " " + metaFileKeyValues.get(key) + "\n").getBytes()); outStream.flush(); outStream.close(); }
28. TestReplication#writeFile()
Project: hadoop-hdfs
File: TestReplication.java
File: TestReplication.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
29. TestModTime#writeFile()
Project: hadoop-hdfs
File: TestModTime.java
File: TestModTime.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
30. TestInjectionForSimulatedStorage#writeFile()
Project: hadoop-hdfs
File: TestInjectionForSimulatedStorage.java
File: TestInjectionForSimulatedStorage.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[filesize]; for (int i = 0; i < buffer.length; i++) { buffer[i] = '1'; } stm.write(buffer); stm.close(); }
31. TestFSOutputSummer#writeFile2()
Project: hadoop-hdfs
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data chunk by chunk */ private void writeFile2(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); int i = 0; for (; i < FILE_SIZE - BYTES_PER_CHECKSUM; i += BYTES_PER_CHECKSUM) { stm.write(expected, i, BYTES_PER_CHECKSUM); } stm.write(expected, i, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
32. TestFileStatus#writeFile()
Project: hadoop-hdfs
File: TestFileStatus.java
File: TestFileStatus.java
private void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
33. TestDecommission#writeFile()
Project: hadoop-hdfs
File: TestDecommission.java
File: TestDecommission.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
34. TestStickyBit#confirmCanAppend()
Project: hadoop-hdfs
File: TestStickyBit.java
File: TestStickyBit.java
/** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException { // Create a tmp directory with wide-open permissions and sticky bit Path p = new Path(baseDir, "tmp"); hdfs.mkdirs(p); hdfs.setPermission(p, new FsPermission((short) 01777)); // Write a file to the new tmp directory as a regular user hdfs = logonAs(user1, conf, hdfs); Path file = new Path(p, "foo"); writeFile(hdfs, file); hdfs.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file hdfs = logonAs(user2, conf, hdfs); Path file2 = new Path(p, "foo"); FSDataOutputStream h = hdfs.append(file2); h.write("Some more data".getBytes()); h.close(); }
35. TestHdfsProxy#createFile()
Project: hadoop-hdfs
File: TestHdfsProxy.java
File: TestHdfsProxy.java
private static MyFile createFile(Path root, FileSystem fs, int levels) throws IOException { MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); Path p = new Path(root, f.getName()); FSDataOutputStream out = fs.create(p); byte[] toWrite = new byte[f.getSize()]; new Random(f.getSeed()).nextBytes(toWrite); out.write(toWrite); out.close(); FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); return f; }
36. TestReplication#writeFile()
Project: hadoop-common
File: TestReplication.java
File: TestReplication.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
37. TestModTime#writeFile()
Project: hadoop-common
File: TestModTime.java
File: TestModTime.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
38. TestInjectionForSimulatedStorage#writeFile()
Project: hadoop-common
File: TestInjectionForSimulatedStorage.java
File: TestInjectionForSimulatedStorage.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[filesize]; for (int i = 0; i < buffer.length; i++) { buffer[i] = '1'; } stm.write(buffer); stm.close(); }
39. TestFSOutputSummer#writeFile2()
Project: hadoop-common
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data chunk by chunk */ private void writeFile2(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); int i = 0; for (; i < FILE_SIZE - BYTES_PER_CHECKSUM; i += BYTES_PER_CHECKSUM) { stm.write(expected, i, BYTES_PER_CHECKSUM); } stm.write(expected, i, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
40. TestFileStatus#writeFile()
Project: hadoop-common
File: TestFileStatus.java
File: TestFileStatus.java
private void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
41. TestDecommission#writeFile()
Project: hadoop-common
File: TestDecommission.java
File: TestDecommission.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
42. TestStickyBit#confirmCanAppend()
Project: hadoop-common
File: TestStickyBit.java
File: TestStickyBit.java
/** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ private void confirmCanAppend(Configuration conf, FileSystem hdfs, Path baseDir) throws IOException { // Create a tmp directory with wide-open permissions and sticky bit Path p = new Path(baseDir, "tmp"); hdfs.mkdirs(p); hdfs.setPermission(p, new FsPermission((short) 01777)); // Write a file to the new tmp directory as a regular user hdfs = logonAs(user1, conf, hdfs); Path file = new Path(p, "foo"); writeFile(hdfs, file); hdfs.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file hdfs = logonAs(user2, conf, hdfs); Path file2 = new Path(p, "foo"); FSDataOutputStream h = hdfs.append(file2); h.write("Some more data".getBytes()); h.close(); }
43. FileSystemContractBaseTest#testOverwrite()
Project: hadoop-common
File: FileSystemContractBaseTest.java
File: FileSystemContractBaseTest.java
public void testOverwrite() throws IOException { Path path = path("/test/hadoop/file"); fs.mkdirs(path.getParent()); createFile(path); assertTrue("Exists", fs.exists(path)); assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); try { fs.create(path, false); fail("Should throw IOException."); } catch (IOException e) { } FSDataOutputStream out = fs.create(path, true); out.write(data, 0, data.length); out.close(); assertTrue("Exists", fs.exists(path)); assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); }
44. TestHdfsProxy#createFile()
Project: hadoop-common
File: TestHdfsProxy.java
File: TestHdfsProxy.java
private static MyFile createFile(Path root, FileSystem fs, int levels) throws IOException { MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); Path p = new Path(root, f.getName()); FSDataOutputStream out = fs.create(p); byte[] toWrite = new byte[f.getSize()]; new Random(f.getSeed()).nextBytes(toWrite); out.write(toWrite); out.close(); FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); return f; }
45. TestReplication#writeFile()
Project: hadoop-20
File: TestReplication.java
File: TestReplication.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
46. TestModTime#writeFile()
Project: hadoop-20
File: TestModTime.java
File: TestModTime.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
47. TestLeaseRecovery2#createFile()
Project: hadoop-20
File: TestLeaseRecovery2.java
File: TestLeaseRecovery2.java
// try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. private Path createFile(DistributedFileSystem dfs, int size, boolean triggerSoftLease) throws IOException, InterruptedException { // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); System.out.println("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, bufferSize, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write random number of bytes into it. System.out.println("size=" + size); stm.write(buffer, 0, size); // sync file AppendTestUtil.LOG.info("sync"); stm.sync(); if (triggerSoftLease) { AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.leasechecker.interruptAndJoin(); } return filepath; }
48. TestInjectionForSimulatedStorage#writeFile()
Project: hadoop-20
File: TestInjectionForSimulatedStorage.java
File: TestInjectionForSimulatedStorage.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[filesize]; for (int i = 0; i < buffer.length; i++) { buffer[i] = '1'; } stm.write(buffer); stm.close(); }
49. TestHftpFileSystem#testIsUnderConstruction()
Project: hadoop-20
File: TestHftpFileSystem.java
File: TestHftpFileSystem.java
/** * Tests isUnderConstruction() functionality. */ public void testIsUnderConstruction() throws Exception { // Open output file stream. FSDataOutputStream out = hdfs.create(TEST_FILE, true); out.writeBytes("test"); // Test file under construction. FSDataInputStream in1 = hftpFs.open(TEST_FILE); assertTrue(in1.isUnderConstruction()); in1.close(); // Close output file stream. out.close(); // Test file not under construction. FSDataInputStream in2 = hftpFs.open(TEST_FILE); assertFalse(in2.isUnderConstruction()); in2.close(); }
50. TestFSOutputSummer#testWriteAndSync()
Project: hadoop-20
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
@Test public void testWriteAndSync() throws Exception { setUp(false); Path file = new Path("/testWriteAndSync"); FSDataOutputStream out = fileSys.create(file); for (int i = 0; i < (BYTES_PER_CHECKSUM - 2); i++) { out.write(0); } out.sync(); for (int i = 0; i < (BYTES_PER_CHECKSUM - 2); i++) { out.write(0); } byte buffer[] = new byte[1]; out.write(buffer, 0, 1); }
51. TestFSOutputSummer#writeFile2()
Project: hadoop-20
File: TestFSOutputSummer.java
File: TestFSOutputSummer.java
/* create a file, write data chunk by chunk */ private void writeFile2(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), NUM_OF_DATANODES, BLOCK_SIZE); int i = 0; for (; i < FILE_SIZE - BYTES_PER_CHECKSUM; i += BYTES_PER_CHECKSUM) { stm.write(expected, i, BYTES_PER_CHECKSUM); } stm.write(expected, i, FILE_SIZE - 3 * BYTES_PER_CHECKSUM); stm.close(); checkFile(name); cleanupFile(name); }
52. TestFileStatusExtended#testFileUnderConstruction()
Project: hadoop-20
File: TestFileStatusExtended.java
File: TestFileStatusExtended.java
@Test public void testFileUnderConstruction() throws Exception { String fileName = "/testFileUnderConstruction"; FSDataOutputStream out = fs.create(new Path(fileName)); byte[] buffer = new byte[BLOCK_SIZE * 5 + 256]; random.nextBytes(buffer); out.write(buffer); out.sync(); NameNode nn = cluster.getNameNode(); List<FileStatusExtended> stats = nn.getRandomFilesSample(1); assertEquals(1, stats.size()); assertEquals(((DistributedFileSystem) fs).getClient().clientName, stats.get(0).getHolder()); }
53. TestFileStatusCache#writeFile()
Project: hadoop-20
File: TestFileStatusCache.java
File: TestFileStatusCache.java
private void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
54. TestFileStatus#writeFile()
Project: hadoop-20
File: TestFileStatus.java
File: TestFileStatus.java
private void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); }
55. TestDecommission#writeFile()
Project: hadoop-20
File: TestDecommission.java
File: TestDecommission.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); LOG.info("Created file " + name + " with " + repl + " replicas."); }
56. TestAccessTime#appendFile()
Project: hadoop-20
File: TestAccessTime.java
File: TestAccessTime.java
private void appendFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.append(name, fileSys.getConf().getInt("io.file.buffer.size", 4096)); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); sleep(1); stm.write(buffer); FileStatus stat = fileSys.getFileStatus(name); long atime1 = stat.getAccessTime(); long mtime1 = stat.getModificationTime(); System.out.println("Appending after write: accessTime = " + atime1 + " modTime = " + mtime1); sleep(1); stm.close(); stat = fileSys.getFileStatus(name); atime1 = stat.getAccessTime(); mtime1 = stat.getModificationTime(); System.out.println("Appending after close: accessTime = " + atime1 + " modTime = " + mtime1); }
57. TestAccessTime#createFile()
Project: hadoop-20
File: TestAccessTime.java
File: TestAccessTime.java
private void createFile(FileSystem fileSys, Path name, int repl, boolean overwrite) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, overwrite, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); sleep(1); stm.write(buffer); FileStatus stat = fileSys.getFileStatus(name); long atime1 = stat.getAccessTime(); long mtime1 = stat.getModificationTime(); System.out.println("Creating after write: accessTime = " + atime1 + " modTime = " + mtime1); sleep(1); stm.close(); stat = fileSys.getFileStatus(name); atime1 = stat.getAccessTime(); mtime1 = stat.getModificationTime(); System.out.println("Creating after close: accessTime = " + atime1 + " modTime = " + mtime1); }
58. TestDatanodeFadvise#runFileFadvise()
Project: hadoop-20
File: TestDatanodeFadvise.java
File: TestDatanodeFadvise.java
private void runFileFadvise(int advise, int expectedFadvise) throws Exception { nFadvise.set(0); InjectionHandler.set(new FadviseHandler(advise)); DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); WriteOptions options = new WriteOptions(); options.setFadvise(advise); FSDataOutputStream out = fs.create(new Path("/test"), null, true, 1024, (short) 3, BLOCK_SIZE, 512, null, null, options); Random r = new Random(); byte buffer[] = new byte[BLOCK_SIZE]; r.nextBytes(buffer); out.write(buffer); out.close(); assertTrue(NativeIO.isfadvisePossible()); waitFadvise(expectedFadvise); }
59. LocalReadWritePerf#writeFile()
Project: hadoop-20
File: LocalReadWritePerf.java
File: LocalReadWritePerf.java
/** * Write a hdfs file with replication factor 1 */ private void writeFile(Path filePath, long sizeKB) throws IOException { // Write a file with the specified amount of data FSDataOutputStream os = fs.create(filePath, true, getConf().getInt("io.file.buffer.size", 4096), (short) 1, blockSize); long fileSize = sizeKB * 1024; int bufSize = (int) Math.min(MAX_BUF_SIZE, fileSize); byte[] data = new byte[bufSize]; long toWrite = fileSize; rand.nextBytes(data); while (toWrite > 0) { int len = (int) Math.min(toWrite, bufSize); os.write(data, 0, len); toWrite -= len; } os.sync(); os.close(); }
60. BlockReaderTestUtil#writeFile()
Project: hadoop-20
File: BlockReaderTestUtil.java
File: BlockReaderTestUtil.java
/** * Create a file of the given size filled with random data. */ public byte[] writeFile(Path filepath, int sizeKB) throws IOException { FileSystem fs = cluster.getFileSystem(); // Write a file with the specified amount of data FSDataOutputStream os = fs.create(filepath); byte data[] = new byte[1024 * sizeKB]; new Random().nextBytes(data); os.write(data); os.close(); return data; }
61. FileSystemContractBaseTest#testOverwrite()
Project: hadoop-20
File: FileSystemContractBaseTest.java
File: FileSystemContractBaseTest.java
public void testOverwrite() throws IOException { Path path = path("/test/hadoop/file"); fs.mkdirs(path.getParent()); createFile(path); assertTrue("Exists", fs.exists(path)); assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); try { fs.create(path, false); fail("Should throw IOException."); } catch (IOException e) { } FSDataOutputStream out = fs.create(path, true); out.write(data, 0, data.length); out.close(); assertTrue("Exists", fs.exists(path)); assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); }
62. TestHDFSIntegration#writeToPath()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void writeToPath(String path, int numRows, String user, String group) throws IOException { Path p = new Path(path); miniDFS.getFileSystem().mkdirs(p); miniDFS.getFileSystem().setOwner(p, user, group); // miniDFS.getFileSystem().setPermission(p, FsPermission.valueOf("-rwxrwx---")); FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path(path + "/stuff.txt")); for (int i = 0; i < numRows; i++) { f1.writeChars("random" + i + "\n"); } f1.flush(); f1.close(); miniDFS.getFileSystem().setOwner(new Path(path + "/stuff.txt"), "asuresh", "supergroup"); miniDFS.getFileSystem().setPermission(new Path(path + "/stuff.txt"), FsPermission.valueOf("-rwxrwx---")); }
63. TestHDFSIntegration#writeToPath()
Project: sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void writeToPath(String path, int numRows, String user, String group) throws IOException { Path p = new Path(path); miniDFS.getFileSystem().mkdirs(p); miniDFS.getFileSystem().setOwner(p, user, group); // miniDFS.getFileSystem().setPermission(p, FsPermission.valueOf("-rwxrwx---")); FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path(path + "/stuff.txt")); for (int i = 0; i < numRows; i++) { f1.writeChars("random" + i + "\n"); } f1.flush(); f1.close(); miniDFS.getFileSystem().setOwner(new Path(path + "/stuff.txt"), "asuresh", "supergroup"); miniDFS.getFileSystem().setPermission(new Path(path + "/stuff.txt"), FsPermission.valueOf("-rwxrwx---")); }
64. ITHdfsOpsTest#TestPath()
Project: kylin
File: ITHdfsOpsTest.java
File: ITHdfsOpsTest.java
@Test public void TestPath() throws IOException { String hdfsWorkingDirectory = KylinConfig.getInstanceFromEnv().getHdfsWorkingDirectory(); Path coprocessorDir = new Path(hdfsWorkingDirectory, "test"); fileSystem.mkdirs(coprocessorDir); Path newFile = new Path(coprocessorDir, "test_file"); newFile = newFile.makeQualified(fileSystem.getUri(), null); FSDataOutputStream stream = fileSystem.create(newFile); stream.write(new byte[] { 0, 1, 2 }); stream.close(); }
65. TestSignalManager#testConstraintsGetReadyTimestamp()
Project: kite
File: TestSignalManager.java
File: TestSignalManager.java
@Test public void testConstraintsGetReadyTimestamp() throws IOException { SignalManager manager = new SignalManager(fileSystem, testDirectory); Constraints constraints = new Constraints(DatasetTestUtilities.USER_SCHEMA).with("email", "[email protected]"); Path signalFilePath = new Path(this.testDirectory, "email=testConstraintsReady%40domain.com"); // drop a file at the signal path FSDataOutputStream stream = this.fileSystem.create(signalFilePath, true); stream.writeUTF(String.valueOf(System.currentTimeMillis())); stream.close(); Assert.assertTrue(manager.getReadyTimestamp(constraints) != -1); }
66. TestHDFSIntegration#writeToPath()
Project: incubator-sentry
File: TestHDFSIntegration.java
File: TestHDFSIntegration.java
private void writeToPath(String path, int numRows, String user, String group) throws IOException { Path p = new Path(path); miniDFS.getFileSystem().mkdirs(p); miniDFS.getFileSystem().setOwner(p, user, group); // miniDFS.getFileSystem().setPermission(p, FsPermission.valueOf("-rwxrwx---")); FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path(path + "/stuff.txt")); for (int i = 0; i < numRows; i++) { f1.writeChars("random" + i + "\n"); } f1.flush(); f1.close(); miniDFS.getFileSystem().setOwner(new Path(path + "/stuff.txt"), "asuresh", "supergroup"); miniDFS.getFileSystem().setPermission(new Path(path + "/stuff.txt"), FsPermission.valueOf("-rwxrwx---")); }
67. IgniteHadoopFileSystemAbstractSelfTest#testRenameDirectoryIfDstPathExists()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testRenameDirectoryIfDstPathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcDir = new Path(fsHome, "/tmp/"); Path dstDir = new Path(fsHome, "/tmpNew/"); FSDataOutputStream os = fs.create(new Path(srcDir, "file1")); os.close(); os = fs.create(new Path(dstDir, "file2")); os.close(); assertTrue("Rename succeeded [srcDir=" + srcDir + ", dstDir=" + dstDir + ']', fs.rename(srcDir, dstDir)); assertPathExists(fs, dstDir); assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp")); assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp/file1")); }
68. IgniteHadoopFileSystemAbstractSelfTest#testRenameFileIfDstPathExists()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testRenameFileIfDstPathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "srcFile"); Path dstFile = new Path(fsHome, "dstFile"); FSDataOutputStream os = fs.create(srcFile); os.close(); os = fs.create(dstFile); os.close(); assertFalse(fs.rename(srcFile, dstFile)); assertPathExists(fs, srcFile); assertPathExists(fs, dstFile); }
69. IgniteHadoopFileSystemAbstractSelfTest#testCreateBase()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ @SuppressWarnings("deprecation") public void testCreateBase() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3"); Path file = new Path(dir, "someFile"); assertPathDoesNotExist(fs, file); FsPermission fsPerm = new FsPermission((short) 644); FSDataOutputStream os = fs.create(file, fsPerm, false, 1, (short) 1, 1L, null); // Try to write something in file. os.write("abc".getBytes()); os.close(); // Check file status. FileStatus fileStatus = fs.getFileStatus(file); assertFalse(fileStatus.isDir()); assertEquals(file, fileStatus.getPath()); assertEquals(fsPerm, fileStatus.getPermission()); }
70. HadoopIgfs20FileSystemAbstractSelfTest#testCreateBase()
Project: ignite
File: HadoopIgfs20FileSystemAbstractSelfTest.java
File: HadoopIgfs20FileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testCreateBase() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3"); Path file = new Path(dir, "someFile"); assertPathDoesNotExist(fs, file); FsPermission fsPerm = new FsPermission((short) 644); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(fsPerm)); // Try to write something in file. os.write("abc".getBytes()); os.close(); // Check file status. FileStatus fileStatus = fs.getFileStatus(file); assertFalse(fileStatus.isDirectory()); assertEquals(file, fileStatus.getPath()); assertEquals(fsPerm, fileStatus.getPermission()); }
71. TestHFile#truncateFile()
Project: hindex
File: TestHFile.java
File: TestHFile.java
public static void truncateFile(FileSystem fs, Path src, Path dst) throws IOException { FileStatus fst = fs.getFileStatus(src); long len = fst.getLen(); len = len / 2; // create a truncated hfile FSDataOutputStream fdos = fs.create(dst); byte[] buf = new byte[(int) len]; FSDataInputStream fdis = fs.open(src); fdis.read(buf); fdos.write(buf); fdis.close(); fdos.close(); }
72. TestFixedFileTrailer#writeTrailer()
Project: hindex
File: TestFixedFileTrailer.java
File: TestFixedFileTrailer.java
private void writeTrailer(Path trailerPath, FixedFileTrailer t, byte[] useBytesInstead) throws IOException { // Expect one non-null. assert (t == null) != (useBytesInstead == null); FSDataOutputStream fsdos = fs.create(trailerPath); // to make deserializer's job less trivial fsdos.write(135); if (useBytesInstead != null) { fsdos.write(useBytesInstead); } else { t.serialize(fsdos); } fsdos.close(); }
73. Dummy#create()
Project: HiBench
File: Dummy.java
File: Dummy.java
public void create() throws IOException { log.info("Creating dummy file " + path + " with " + slots + " slots..."); Utils.checkHdfsPath(path); FileSystem fs = path.getFileSystem(new Configuration()); FSDataOutputStream out = fs.create(path); String contents = ""; for (int i = 1; i <= slots; i++) { contents = contents.concat(Integer.toString(i) + "\n"); } out.write(contents.getBytes("UTF-8")); out.close(); }
74. TestCopyFiles#createFile()
Project: hadoop-mapreduce
File: TestCopyFiles.java
File: TestCopyFiles.java
static MyFile createFile(Path root, FileSystem fs, int levels) throws IOException { MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); Path p = new Path(root, f.getName()); FSDataOutputStream out = fs.create(p); byte[] toWrite = new byte[f.getSize()]; new Random(f.getSeed()).nextBytes(toWrite); out.write(toWrite); out.close(); FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); return f; }
75. TestUtils#getStream()
Project: succinct
File: TestUtils.java
File: TestUtils.java
public static FSDataInputStream getStream(ByteBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeByte(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
76. TestUtils#getStream()
Project: succinct
File: TestUtils.java
File: TestUtils.java
public static FSDataInputStream getStream(IntBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeInt(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
77. TestUtils#getStream()
Project: succinct
File: TestUtils.java
File: TestUtils.java
public static FSDataInputStream getStream(ShortBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeShort(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
78. TestUtils#getStream()
Project: succinct
File: TestUtils.java
File: TestUtils.java
public static FSDataInputStream getStream(LongBuffer buf) throws IOException { File tmpDir = Files.createTempDir(); Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut"); FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration()); FSDataOutputStream fOut = fs.create(filePath); buf.rewind(); while (buf.hasRemaining()) { fOut.writeLong(buf.get()); } fOut.close(); buf.rewind(); return fs.open(filePath); }
79. HCatalogTestUtils#createInputFile()
Project: sqoop
File: HCatalogTestUtils.java
File: HCatalogTestUtils.java
private void createInputFile(Path path, int rowCount) throws IOException { if (fs.exists(path)) { fs.delete(path, true); } FSDataOutputStream os = fs.create(path); for (int i = 0; i < rowCount; i++) { String s = i + "\n"; os.writeChars(s); } os.close(); }
80. ITMassInQueryTest#setup()
Project: kylin
File: ITMassInQueryTest.java
File: ITMassInQueryTest.java
@Before public void setup() throws Exception { ITKylinQueryTest.clean(); ITKylinQueryTest.joinType = "left"; ITKylinQueryTest.setupAll(); Configuration hconf = HadoopUtil.getCurrentConfiguration(); fileSystem = FileSystem.get(hconf); int sellerCount = 200; Random r = new Random(); vipSellers = Sets.newHashSet(); for (int i = 0; i < sellerCount; i++) { vipSellers.add(10000000L + r.nextInt(1500)); } Path path = new Path("/tmp/vip_customers.txt"); fileSystem.delete(path, false); FSDataOutputStream outputStream = fileSystem.create(path); org.apache.commons.io.IOUtils.write(StringUtils.join(vipSellers, "\n"), outputStream); outputStream.close(); System.out.println("The filter is " + vipSellers); }
81. TestCreateDatasetCommandCluster#testBasicUseLocalSchema()
Project: kite
File: TestCreateDatasetCommandCluster.java
File: TestCreateDatasetCommandCluster.java
@Test public void testBasicUseLocalSchema() throws Exception { String avsc = "target/localUser.avsc"; FSDataOutputStream out = getFS().create(new Path(avsc), true); ByteStreams.copy(Resources.getResource("test-schemas/user.avsc").openStream(), out); out.close(); command.avroSchemaFile = avsc; command.datasets = Lists.newArrayList("users"); command.run(); DatasetDescriptor expectedDescriptor = new DatasetDescriptor.Builder().schemaUri("resource:test-schemas/user.avsc").build(); verify(getMockRepo()).create("default", "users", expectedDescriptor); verify(console).debug(contains("Created"), eq("users")); }
82. TestDatasetDescriptor#testSchemaFromHdfs()
Project: kite
File: TestDatasetDescriptor.java
File: TestDatasetDescriptor.java
@Test public void testSchemaFromHdfs() throws IOException { MiniDFSTest.setupFS(); FileSystem fs = MiniDFSTest.getDFS(); // copy a schema to HDFS Path schemaPath = fs.makeQualified(new Path("schema.avsc")); FSDataOutputStream out = fs.create(schemaPath); IOUtils.copyBytes(DatasetTestUtilities.USER_SCHEMA_URL.toURL().openStream(), out, fs.getConf()); out.close(); // build a schema using the HDFS path and check it's the same Schema schema = new DatasetDescriptor.Builder().schemaUri(schemaPath.toUri()).build().getSchema(); Assert.assertEquals(DatasetTestUtilities.USER_SCHEMA, schema); MiniDFSTest.teardownFS(); }
83. TestMRRJobsDAGApi#testVertexGroups()
Project: incubator-tez
File: TestMRRJobsDAGApi.java
File: TestMRRJobsDAGApi.java
@Test(timeout = 60000) public void testVertexGroups() throws Exception { LOG.info("Running Group Test"); Path inPath = new Path(TEST_ROOT_DIR, "in-groups"); Path outPath = new Path(TEST_ROOT_DIR, "out-groups"); FSDataOutputStream out = remoteFs.create(inPath); OutputStreamWriter writer = new OutputStreamWriter(out); writer.write("abcd "); writer.write("efgh "); writer.write("abcd "); writer.write("efgh "); writer.close(); out.close(); UnionExample job = new UnionExample(); if (job.run(inPath.toString(), outPath.toString(), mrrTezCluster.getConfig())) { LOG.info("Success VertexGroups Test"); } else { throw new TezUncheckedException("VertexGroups Test Failed"); } }
84. IgniteHadoopFileSystemAbstractSelfTest#testSetWorkingDirectory()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetWorkingDirectory() throws Exception { Path dir = new Path("/tmp/nested/dir"); Path file = new Path("file"); fs.mkdirs(dir); fs.setWorkingDirectory(dir); FSDataOutputStream os = fs.create(file); os.close(); String filePath = fs.getFileStatus(new Path(dir, file)).getPath().toString(); assertTrue(filePath.contains("/tmp/nested/dir/file")); }
85. IgniteHadoopFileSystemAbstractSelfTest#testSetWorkingDirectoryIfPathIsNull()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetWorkingDirectoryIfPathIsNull() throws Exception { fs.setWorkingDirectory(null); Path file = new Path("file"); FSDataOutputStream os = fs.create(file); os.close(); String path = fs.getFileStatus(file).getPath().toString(); assertTrue(path.endsWith("/user/" + getClientFsUser() + "/file")); }
86. IgniteHadoopFileSystemAbstractSelfTest#testRenameDirectory()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testRenameDirectory() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/tmp/"); Path newDir = new Path(fsHome, "/tmpNew/"); FSDataOutputStream os = fs.create(new Path(dir, "myFile")); os.close(); assertTrue("Rename failed [dir=" + dir + ", newDir=" + newDir + ']', fs.rename(dir, newDir)); assertPathDoesNotExist(fs, dir); assertPathExists(fs, newDir); }
87. IgniteHadoopFileSystemAbstractSelfTest#testRenameFile()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testRenameFile() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "/tmp/srcFile"); Path dstFile = new Path(fsHome, "/tmp/dstFile"); FSDataOutputStream os = fs.create(srcFile); os.close(); assertTrue(fs.rename(srcFile, dstFile)); assertPathDoesNotExist(fs, srcFile); assertPathExists(fs, dstFile); }
88. IgniteHadoopFileSystemAbstractSelfTest#testAppendIfPathPointsToDirectory()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testAppendIfPathPointsToDirectory() throws Exception { final Path fsHome = new Path(primaryFsUri); final Path dir = new Path(fsHome, "/tmp"); Path file = new Path(dir, "my"); FSDataOutputStream os = fs.create(file); os.close(); GridTestUtils.assertThrowsInherited(log, new Callable<Object>() { @Override public Object call() throws Exception { return fs.append(new Path(fsHome, dir), 1024); } }, IOException.class, null); }
89. IgniteHadoopFileSystemAbstractSelfTest#testOpenIfPathIsAlreadyOpened()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testOpenIfPathIsAlreadyOpened() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "someFile"); FSDataOutputStream os = fs.create(file); os.close(); FSDataInputStream is1 = fs.open(file); FSDataInputStream is2 = fs.open(file); is1.close(); is2.close(); }
90. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckNonRecursiveness()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetOwnerCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); fs.setOwner(file, "fUser", "fGroup"); fs.setOwner(tmpDir, "dUser", "dGroup"); assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner()); assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup()); assertEquals("fUser", fs.getFileStatus(file).getOwner()); assertEquals("fGroup", fs.getFileStatus(file).getGroup()); }
91. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerIfOutputStreamIsNotClosed()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** * @throws Exception If failed. */ public void testSetOwnerIfOutputStreamIsNotClosed() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "myFile"); FSDataOutputStream os = fs.create(file); fs.setOwner(file, "aUser", "aGroup"); os.close(); assertEquals("aUser", fs.getFileStatus(file).getOwner()); assertEquals("aGroup", fs.getFileStatus(file).getGroup()); }
92. IgniteHadoopFileSystemAbstractSelfTest#testSetOwner()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetOwner() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner()); fs.setOwner(file, "aUser", "aGroup"); assertEquals("aUser", fs.getFileStatus(file).getOwner()); assertEquals("aGroup", fs.getFileStatus(file).getGroup()); }
93. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckParametersGroupIsNull()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetOwnerCheckParametersGroupIsNull() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.setOwner(file, "aUser", null); return null; } }, NullPointerException.class, "Ouch! Argument cannot be null: grpName"); }
94. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckParametersUserIsNull()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetOwnerCheckParametersUserIsNull() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.setOwner(file, null, "aGroup"); return null; } }, NullPointerException.class, "Ouch! Argument cannot be null: username"); }
95. IgniteHadoopFileSystemAbstractSelfTest#testSetOwnerCheckParametersPathIsNull()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetOwnerCheckParametersPathIsNull() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.setOwner(null, "aUser", "aGroup"); return null; } }, NullPointerException.class, "Ouch! Argument cannot be null: p"); }
96. IgniteHadoopFileSystemAbstractSelfTest#testSetPermissionIfOutputStreamIsNotClosed()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "myFile"); FsPermission perm = new FsPermission((short) 123); FSDataOutputStream os = fs.create(file); fs.setPermission(file, perm); os.close(); assertEquals(perm, fs.getFileStatus(file).getPermission()); }
97. IgniteHadoopFileSystemAbstractSelfTest#testSetPermission()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ @SuppressWarnings("OctalInteger") public void testSetPermission() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file); os.close(); for (short i = 0; i <= 0777; i += 7) { FsPermission perm = new FsPermission(i); fs.setPermission(file, perm); assertEquals(perm, fs.getFileStatus(file).getPermission()); } }
98. IgniteHadoopFileSystemAbstractSelfTest#testSetPermissionCheckNonRecursiveness()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ @SuppressWarnings("deprecation") public void testSetPermissionCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); FsPermission perm = new FsPermission((short) 123); fs.setPermission(tmpDir, perm); assertEquals(perm, fs.getFileStatus(tmpDir).getPermission()); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission()); }
99. IgniteHadoopFileSystemAbstractSelfTest#testSetPermissionCheckDefaultPermission()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ @SuppressWarnings("deprecation") public void testSetPermissionCheckDefaultPermission() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); os.close(); fs.setPermission(file, null); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission()); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission()); }
100. IgniteHadoopFileSystemAbstractSelfTest#testDeleteRecursivelyFromRoot()
Project: ignite
File: IgniteHadoopFileSystemAbstractSelfTest.java
File: IgniteHadoopFileSystemAbstractSelfTest.java
/** @throws Exception If failed. */ public void testDeleteRecursivelyFromRoot() throws Exception { Path fsHome = new Path(primaryFsUri); Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(someDir3); os.close(); Path root = new Path(fsHome, "/"); assertFalse(fs.delete(root, true)); assertTrue(fs.delete(new Path("/someDir1"), true)); assertPathDoesNotExist(fs, someDir3); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2")); assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1")); assertPathExists(fs, root); }