org.apache.hadoop.fs.FileSystem.mkdirs()

Here are the examples of the java api org.apache.hadoop.fs.FileSystem.mkdirs() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

538 Examples 7

19 Source : DrillFileSystem.java
with Apache License 2.0
from zpochen

@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
    return underlyingFs.mkdirs(f, permission);
}

19 Source : DrillFileSystem.java
with Apache License 2.0
from zpochen

@Override
public boolean mkdirs(Path folderPath) throws IOException {
    if (!underlyingFs.exists(folderPath)) {
        return underlyingFs.mkdirs(folderPath);
    } else if (!underlyingFs.getFileStatus(folderPath).isDir()) {
        throw new IOException("The specified folder path exists and is not a folder.");
    }
    return false;
}

19 Source : BaseTestHiveImpersonation.java
with Apache License 2.0
from zpochen

protected static void prepHiveConfAndData() throws Exception {
    hiveConf = new HiveConf();
    // Configure metastore persistence db location on local filesystem
    final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", getTempDir("metastore_db"));
    hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);
    hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir"));
    hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir"));
    // Set MiniDFS conf in HiveConf
    hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));
    whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
    FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));
    studentData = getPhysicalFileFromResource("student.txt");
    voterData = getPhysicalFileFromResource("voter.txt");
}

19 Source : HCFSJniFuseFileSystem.java
with Apache License 2.0
from opendataio

private int mkdirInternal(String path, long mode) {
    final Path uri = mPathResolverCache.getUnchecked(path);
    if (uri.getName().length() > MAX_NAME_LENGTH) {
        LOG.error("Failed to create directory {}: name longer than {} characters", path, MAX_NAME_LENGTH);
        return -ErrorCodes.ENAMETOOLONG();
    }
    try {
        mFileSystem.mkdirs(uri, new FsPermission((int) mode));
        setUserGroupIfNeeded(uri);
    } catch (Throwable e) {
        LOG.error("Failed to mkdir {}: ", path, e);
        return -ErrorCodes.EIO();
    }
    return 0;
}

19 Source : TestIntegration.java
with Apache License 2.0
from NJUJYB

private void mkdirs(String... entries) throws IOException {
    for (String entry : entries) {
        fs.mkdirs(new Path(entry));
    }
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Test
public void testRelativePathWitRepl() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    createFile(inputPath, fs, sub1.getName(), "a");
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, "input");
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchiveWithRepl();
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Before
public void setUp() throws Exception {
    conf = new Configuration();
    conf.set(CapacitySchedulerConfiguration.PREFIX + CapacitySchedulerConfiguration.ROOT + "." + CapacitySchedulerConfiguration.QUEUES, "default");
    conf.set(CapacitySchedulerConfiguration.PREFIX + CapacitySchedulerConfiguration.ROOT + ".default." + CapacitySchedulerConfiguration.CAPACITY, "100");
    dfscluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(2).format(true).racks(null).build();
    fs = dfscluster.getFileSystem();
    // prepare archive path:
    archivePath = new Path(fs.getHomeDirectory(), "archive");
    fs.delete(archivePath, true);
    // prepare input path:
    inputPath = new Path(fs.getHomeDirectory(), inputDir);
    fs.delete(inputPath, true);
    fs.mkdirs(inputPath);
    // create basic input files:
    fileList.add(createFile(inputPath, fs, "a"));
    fileList.add(createFile(inputPath, fs, "b"));
    fileList.add(createFile(inputPath, fs, "c"));
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Test
public void testPathWithSpaces() throws Exception {
    // create files/directories with spaces
    createFile(inputPath, fs, "c c");
    final Path sub1 = new Path(inputPath, "sub 1");
    fs.mkdirs(sub1);
    createFile(sub1, fs, "file x y z");
    createFile(sub1, fs, "file");
    createFile(sub1, fs, "x");
    createFile(sub1, fs, "y");
    createFile(sub1, fs, "z");
    final Path sub2 = new Path(inputPath, "sub 1 with suffix");
    fs.mkdirs(sub2);
    createFile(sub2, fs, "z");
    final FsShell shell = new FsShell(conf);
    final String inputPathStr = inputPath.toUri().getPath();
    final List<String> originalPaths = lsr(shell, inputPathStr);
    // make the archive:
    final String fullHarPathStr = makeArchive();
    // compare results
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from NJUJYB

@Test
public void testRelativePath() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    createFile(inputPath, fs, sub1.getName(), "a");
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, "input");
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchive();
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

19 Source : TestFileInputFormatPathFilter.java
with Apache License 2.0
from NJUJYB

public void setUp() throws Exception {
    tearDown();
    localFs.mkdirs(workDir);
}

19 Source : TestPermissionSymlinks.java
with Apache License 2.0
from NJUJYB

@Before
public void setUp() throws Exception {
    // Create initial test files
    fs.mkdirs(linkParent);
    fs.mkdirs(targetParent);
    DFSTestUtil.createFile(fs, target, 1024, (short) 3, 0xBEEFl);
    wrapper.createSymlink(target, link, false);
}

19 Source : TestListPathServlet.java
with Apache License 2.0
from NJUJYB

private void mkdirs(String dirName) throws IOException {
    filelist.add(hftpURI + dirName);
    fs.mkdirs(new Path(dirName));
}

19 Source : TestEditLogJournalFailures.java
with Apache License 2.0
from NJUJYB

/**
 * Do a mutative metadata operation on the file system.
 *
 * @return true if the operation was successful, false otherwise.
 */
private boolean doAnEdit() throws IOException {
    return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
}

19 Source : TestStandbyCheckpoints.java
with Apache License 2.0
from NJUJYB

private void doEdits(int start, int stop) throws IOException {
    for (int i = start; i < stop; i++) {
        Path p = new Path("/test" + i);
        fs.mkdirs(p);
    }
}

19 Source : TestSecureNNWithQJM.java
with Apache License 2.0
from NJUJYB

/**
 * Tests use of QJM with the defined cluster.
 *
 * @throws IOException if there is an I/O error
 */
private void doNNWithQJMTest() throws IOException {
    startCluster();
    replacedertTrue(fs.mkdirs(TEST_PATH));
    // Restart the NN and make sure the edit was persisted
    // and loaded again
    restartNameNode();
    replacedertTrue(fs.exists(TEST_PATH));
    replacedertTrue(fs.mkdirs(TEST_PATH_2));
    // Restart the NN again and make sure both edits are persisted.
    restartNameNode();
    replacedertTrue(fs.exists(TEST_PATH));
    replacedertTrue(fs.exists(TEST_PATH_2));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

@Test(expected = FileNotFoundException.clreplaced)
public void testResolvePathMissingThroughMountPoints2() throws IOException {
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
    fsView.resolvePath(new Path("/user/dirX/nonExisting"));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

void initializeTargetTestRoot() throws IOException {
    targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
    // In case previous test was killed before cleanup
    fsTarget.delete(targetTestRoot, true);
    fsTarget.mkdirs(targetTestRoot);
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

@Before
public void setUp() throws Exception {
    initializeTargetTestRoot();
    // Make  user and data dirs - we creates links to them in the mount table
    fsTarget.mkdirs(new Path(targetTestRoot, "user"));
    fsTarget.mkdirs(new Path(targetTestRoot, "data"));
    fsTarget.mkdirs(new Path(targetTestRoot, "dir2"));
    fsTarget.mkdirs(new Path(targetTestRoot, "dir3"));
    FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot, "aFile"));
    // Now we use the mount fs to set links to user and dir
    // in the test root
    // Set up the defaultMT in the config with our mount point links
    conf = ViewFileSystemTestSetup.createConfig();
    setupMountPoints();
    fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

@Test
public void testMkdirOfMountLink() throws IOException {
    // data exists - mkdirs returns true even though no permission in internal
    // mount table
    replacedert.replacedertTrue("mkdir of existing mount link should succeed", fsView.mkdirs(new Path("/data")));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

@Test
public void testResolvePathThroughMountPoints() throws IOException {
    fileSystemTestHelper.createFile(fsView, "/user/foo");
    replacedert.replacedertEquals(new Path(targetTestRoot, "user/foo"), fsView.resolvePath(new Path("/user/foo")));
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
    replacedert.replacedertEquals(new Path(targetTestRoot, "user/dirX"), fsView.resolvePath(new Path("/user/dirX")));
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/user/dirX/dirY"));
    replacedert.replacedertEquals(new Path(targetTestRoot, "user/dirX/dirY"), fsView.resolvePath(new Path("/user/dirX/dirY")));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

public void testInternalMkdirExisting2() throws IOException {
    replacedert.replacedertTrue("mkdir of existing dir should succeed", fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/internalDir/linkToDir2")));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

@Test(expected = AccessControlException.clreplaced)
public void testInternalMkdirNew2() throws IOException {
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/internalDir/dirNew"));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

public void testInternalMkdirExisting1() throws IOException {
    replacedert.replacedertTrue("mkdir of existing dir should succeed", fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/internalDir")));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

// Mkdir for new internal mount table should fail
@Test(expected = AccessControlException.clreplaced)
public void testInternalMkdirNew() throws IOException {
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/dirNew"));
}

19 Source : ViewFileSystemBaseTest.java
with Apache License 2.0
from NJUJYB

/**
 * Test modify operations (create, mkdir, rename, etc)
 * on internal dirs of mount table
 * These operations should fail since the mount table is read-only or
 * because the internal dir that it is trying to create already
 * exits.
 */
// Mkdir on existing internal mount table succeed except for /
@Test(expected = AccessControlException.clreplaced)
public void testInternalMkdirSlash() throws IOException {
    fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView, "/"));
}

19 Source : TestChRootedFileSystem.java
with Apache License 2.0
from NJUJYB

@Test
public void testMkdirDelete() throws IOException {
    fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX"));
    replacedert.replacedertTrue(fSys.isDirectory(new Path("/dirX")));
    replacedert.replacedertTrue(fSysTarget.isDirectory(new Path(chrootedTo, "dirX")));
    fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX/dirY"));
    replacedert.replacedertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
    replacedert.replacedertTrue(fSysTarget.isDirectory(new Path(chrootedTo, "dirX/dirY")));
    // Delete the created dir
    replacedert.replacedertTrue(fSys.delete(new Path("/dirX/dirY"), false));
    replacedert.replacedertFalse(fSys.exists(new Path("/dirX/dirY")));
    replacedert.replacedertFalse(fSysTarget.exists(new Path(chrootedTo, "dirX/dirY")));
    replacedert.replacedertTrue(fSys.delete(new Path("/dirX"), false));
    replacedert.replacedertFalse(fSys.exists(new Path("/dirX")));
    replacedert.replacedertFalse(fSysTarget.exists(new Path(chrootedTo, "dirX")));
}

19 Source : TestChRootedFileSystem.java
with Apache License 2.0
from NJUJYB

@Test
public void testRename() throws IOException {
    // Rename a file
    fileSystemTestHelper.createFile(fSys, "/newDir/foo");
    fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
    replacedert.replacedertFalse(fSys.exists(new Path("/newDir/foo")));
    replacedert.replacedertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/foo")));
    replacedert.replacedertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys, "/newDir/fooBar")));
    replacedert.replacedertTrue(fSysTarget.isFile(new Path(chrootedTo, "newDir/fooBar")));
    // Rename a dir
    fSys.mkdirs(new Path("/newDir/dirFoo"));
    fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
    replacedert.replacedertFalse(fSys.exists(new Path("/newDir/dirFoo")));
    replacedert.replacedertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/dirFoo")));
    replacedert.replacedertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(fSys, "/newDir/dirFooBar")));
    replacedert.replacedertTrue(fSysTarget.isDirectory(new Path(chrootedTo, "newDir/dirFooBar")));
}

19 Source : TestChRootedFileSystem.java
with Apache License 2.0
from NJUJYB

/**
 * We would have liked renames across file system to fail but
 * Unfortunately there is not way to distinguish the two file systems
 * @throws IOException
 */
@Test
public void testRenameAcrossFs() throws IOException {
    fSys.mkdirs(new Path("/newDir/dirFoo"));
    fSys.rename(new Path("/newDir/dirFoo"), new Path("file:///tmp/dirFooBar"));
    FileSystemTestHelper.isDir(fSys, new Path("/tmp/dirFooBar"));
}

19 Source : TestChRootedFileSystem.java
with Apache License 2.0
from NJUJYB

@Test
public void testWorkingDirectory() throws Exception {
    // First we cd to our test root
    fSys.mkdirs(new Path("/testWd"));
    Path workDir = new Path("/testWd");
    fSys.setWorkingDirectory(workDir);
    replacedert.replacedertEquals(workDir, fSys.getWorkingDirectory());
    fSys.setWorkingDirectory(new Path("."));
    replacedert.replacedertEquals(workDir, fSys.getWorkingDirectory());
    fSys.setWorkingDirectory(new Path(".."));
    replacedert.replacedertEquals(workDir.getParent(), fSys.getWorkingDirectory());
    // cd using a relative path
    // Go back to our test root
    workDir = new Path("/testWd");
    fSys.setWorkingDirectory(workDir);
    replacedert.replacedertEquals(workDir, fSys.getWorkingDirectory());
    Path relativeDir = new Path("existingDir1");
    Path absoluteDir = new Path(workDir, "existingDir1");
    fSys.mkdirs(absoluteDir);
    fSys.setWorkingDirectory(relativeDir);
    replacedert.replacedertEquals(absoluteDir, fSys.getWorkingDirectory());
    // cd using a absolute path
    absoluteDir = new Path("/test/existingDir2");
    fSys.mkdirs(absoluteDir);
    fSys.setWorkingDirectory(absoluteDir);
    replacedert.replacedertEquals(absoluteDir, fSys.getWorkingDirectory());
    // Now open a file relative to the wd we just set above.
    Path absoluteFooPath = new Path(absoluteDir, "foo");
    fSys.create(absoluteFooPath).close();
    fSys.open(new Path("foo")).close();
    // Now mkdir relative to the dir we cd'ed to
    fSys.mkdirs(new Path("newDir"));
    replacedert.replacedertTrue(fSys.isDirectory(new Path(absoluteDir, "newDir")));
    /* Filesystem impls (RawLocal and DistributedFileSystem do not check
     * for existing of working dir
    absoluteDir = getTestRootPath(fSys, "nonexistingPath");
    try {
      fSys.setWorkingDirectory(absoluteDir);
      replacedert.fail("cd to non existing dir should have failed");
    } catch (Exception e) {
      // Exception as expected
    }
    */
    // Try a URI
    final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
    absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
    fSys.mkdirs(absoluteDir);
    fSys.setWorkingDirectory(absoluteDir);
    replacedert.replacedertEquals(absoluteDir, fSys.getWorkingDirectory());
}

19 Source : TestChRootedFileSystem.java
with Apache License 2.0
from NJUJYB

@Test
public void testGetContentSummary() throws IOException {
    // GetContentSummary of a dir
    fSys.mkdirs(new Path("/newDir/dirFoo"));
    ContentSummary cs = fSys.getContentSummary(new Path("/newDir/dirFoo"));
    replacedert.replacedertEquals(-1L, cs.getQuota());
    replacedert.replacedertEquals(-1L, cs.getSpaceQuota());
}

19 Source : AbstractFSContractTestBase.java
with Apache License 2.0
from NJUJYB

/**
 * replacedert that a file exists and whose {@link FileStatus} entry
 * declares that this is a file and not a symlink or directory.
 *
 * @throws IOException IO problems during file operations
 */
protected void mkdirs(Path path) throws IOException {
    replacedertTrue("Failed to mkdir " + path, fileSystem.mkdirs(path));
}

19 Source : TestAzureFileSystemInstrumentation.java
with Apache License 2.0
from naver

@Test
public void testMetricsOnMkdirList() throws Exception {
    long base = getBaseWebResponses();
    // Create a directory
    replacedertTrue(fs.mkdirs(new Path("a")));
    // At the time of writing, it takes 1 request to create the actual directory,
    // plus 2 requests per level to check that there's no blob with that name and
    // 1 request per level above to create it if it doesn't exist.
    // So for the path above (/user/<name>/a), it takes 2 requests each to check
    // there's no blob called /user, no blob called /user/<name> and no blob
    // called /user/<name>/a, and then 3 request for the creation of the three
    // levels, and then 2 requests for checking/stamping the version of AS,
    // totaling 11.
    // Also, there's the initial 1 request for container check so total is 12.
    base = replacedertWebResponsesInRange(base, 1, 12);
    replacedertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
    // List the root contents
    replacedertEquals(1, fs.listStatus(new Path("/")).length);
    base = replacedertWebResponsesEquals(base, 1);
    replacedertNoErrors();
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from naver

@Test
public void testGlobFiles() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    final Path sub2 = new Path(inputPath, "dir2");
    fs.mkdirs(sub1);
    String fileName = "a";
    createFile(inputPath, fs, sub1.getName(), fileName);
    createFile(inputPath, fs, sub2.getName(), fileName);
    // not part of result
    createFile(inputPath, fs, sub1.getName(), "b");
    final String glob = "dir{1,2}/a";
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, inputPath.toString(), inputPath + "/" + glob);
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchive(inputPath, glob);
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr, fullHarPathStr + "/" + glob);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

19 Source : TestHadoopArchives.java
with Apache License 2.0
from naver

@Test
public void testSingleFile() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    String singleFileName = "a";
    createFile(inputPath, fs, sub1.getName(), singleFileName);
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, sub1.toString());
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchive(sub1, singleFileName);
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    replacedert.replacedertEquals(originalPaths, harPaths);
}

19 Source : SimpleMover.java
with Apache License 2.0
from NationalSecurityAgency

Path checkParent(Path path) throws IOException {
    Path parent = path.getParent();
    if (directoryCache.getIfPresent(parent) == null && !fs.exists(parent)) {
        if (fs.mkdirs(parent)) {
            directoryCache.put(parent, parent);
        } else {
            log.warn("unable to create directory (" + parent + ")");
        }
    }
    return path;
}

19 Source : DrillFileSystem.java
with Apache License 2.0
from lealone

@SuppressWarnings("deprecation")
@Override
public boolean mkdirs(Path folderPath) throws IOException {
    if (!underlyingFs.exists(folderPath)) {
        return underlyingFs.mkdirs(folderPath);
    } else if (!underlyingFs.getFileStatus(folderPath).isDir()) {
        throw new IOException("The specified folder path exists and is not a folder.");
    }
    return false;
}

19 Source : Hdfs.java
with GNU Affero General Public License v3.0
from KnowageLabs

public boolean mkdirs(String path) {
    Path pathHdfs = new Path(path);
    try {
        if (!fs.exists(pathHdfs)) {
            fs.mkdirs(pathHdfs);
        }
    } catch (IOException e) {
        logger.error("Impossible to make dirs at path \"" + path + "\"" + e);
        return false;
    }
    return true;
}

19 Source : HdfsDB.java
with MIT License
from fengdis

/**
 * 创建文件夹
 * @param dir
 * @throws Exception
 */
public void mkdir(String dir) throws Exception {
    if (!fs.exists(new Path(dir))) {
        fs.mkdirs(new Path(dir));
    }
}

19 Source : HRegionFileSystem.java
with Apache License 2.0
from fengchen8086

/**
 * Creates a directory. replacedumes the user has already checked for this directory existence.
 *
 * @param dir
 * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
 * whether the directory exists or not, and returns true if it exists.
 * @throws IOException
 */
boolean createDir(Path dir) throws IOException {
    int i = 0;
    IOException lastIOE = null;
    do {
        try {
            return fs.mkdirs(dir);
        } catch (IOException ioe) {
            lastIOE = ioe;
            // directory is present
            if (fs.exists(dir))
                return true;
            try {
                sleepBeforeRetry("Create Directory", i + 1);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }
        }
    } while (++i <= hdfsClientRetriesNumber);
    throw new IOException("Exception in createDir", lastIOE);
}

19 Source : BaseTestHiveImpersonation.java
with Apache License 2.0
from dremio

protected static void prepHiveConfAndData() throws Exception {
    hiveConf = new HiveConf();
    // Configure metastore persistence db location on local filesystem
    final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", getTempDir("metastore_db"));
    // Set login timeout to 60 seconds
    DriverManager.setLoginTimeout(60);
    // Create the database for metastore in derby
    DriverManager.getConnection(dbUrl);
    hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);
    hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir"));
    hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir"));
    hiveConf.set(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false");
    hiveConf.set(ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true");
    hiveConf.set(ConfVars.HIVE_CBO_ENABLED.varname, "false");
    // Set MiniDFS conf in HiveConf
    hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));
    whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
    FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));
    studentData = getPhysicalFileFromResource("student.txt");
    voterData = getPhysicalFileFromResource("voter.txt");
}

19 Source : HadoopFileSystemWrapper.java
with Apache License 2.0
from dremio

@Override
public boolean mkdirs(Path folderPath) throws IOException {
    try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
        if (!underlyingFs.exists(folderPath)) {
            return underlyingFs.mkdirs(folderPath);
        } else if (!underlyingFs.getFileStatus(folderPath).isDirectory()) {
            throw new IOException("The specified folder path exists and is not a folder.");
        }
        return false;
    } catch (FSError e) {
        throw propagateFSError(e);
    }
}

19 Source : HadoopFileSystemWrapper.java
with Apache License 2.0
from dremio

@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
    try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
        return underlyingFs.mkdirs(f, permission);
    } catch (FSError e) {
        throw propagateFSError(e);
    }
}

19 Source : ProtoParquetWriterWithOffsetTest.java
with Apache License 2.0
from criteo

@Before
public void setup() throws IOException {
    final java.nio.file.Path tmpDir = Files.createTempDirectory("hdfs-reader-test-");
    rootPath = new Path(tmpDir.toString());
    finalPath = new Path(rootPath, "final");
    tmpPath = new Path(rootPath, "tmp");
    localFs = FileSystem.getLocal(new Configuration());
    localFs.mkdirs(rootPath);
    localFs.mkdirs(finalPath);
    localFs.mkdirs(tmpPath);
    PrometheusMetrics.clearCollectors();
}

19 Source : TestOzoneFileSystem.java
with Apache License 2.0
from apache

@Test
public void testGetTrashRoots() throws IOException {
    String username = UserGroupInformation.getCurrentUser().getShortUserName();
    Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX);
    Path userTrash = new Path(trashRoot, username);
    Collection<FileStatus> res = o3fs.getTrashRoots(false);
    replacedert.replacedertEquals(0, res.size());
    fs.mkdirs(userTrash);
    res = o3fs.getTrashRoots(false);
    replacedert.replacedertEquals(1, res.size());
    res.forEach(e -> replacedert.replacedertEquals(userTrash.toString(), e.getPath().toUri().getPath()));
    // Only have one user trash for now
    res = o3fs.getTrashRoots(true);
    replacedert.replacedertEquals(1, res.size());
    // Create a few more random user trash dir
    for (int i = 1; i <= 5; i++) {
        Path moreUserTrash = new Path(trashRoot, "trashuser" + i);
        fs.mkdirs(moreUserTrash);
    }
    // And create a file, which should be ignored
    fs.create(new Path(trashRoot, "trashuser99"));
    // allUsers = false should still return current user trash
    res = o3fs.getTrashRoots(false);
    replacedert.replacedertEquals(1, res.size());
    res.forEach(e -> replacedert.replacedertEquals(userTrash.toString(), e.getPath().toUri().getPath()));
    // allUsers = true should return all user trash
    res = o3fs.getTrashRoots(true);
    replacedert.replacedertEquals(6, res.size());
}

19 Source : TestOzoneFileSystem.java
with Apache License 2.0
from apache

@Test
public void testCreateFileShouldCheckExistenceOfDirWithSameName() throws Exception {
    /*
     * Op 1. create file -> /d1/d2/d3/d4/key2
     * Op 2. create dir -> /d1/d2/d3/d4/key2
     *
     * Reverse of the above steps
     * Op 2. create dir -> /d1/d2/d3/d4/key3
     * Op 1. create file -> /d1/d2/d3/d4/key3
     *
     * Op 3. create file -> /d1/d2/d3 (d3 as a file inside /d1/d2)
     */
    Path parent = new Path("/d1/d2/d3/d4/");
    Path file1 = new Path(parent, "key1");
    try (FSDataOutputStream outputStream = fs.create(file1, false)) {
        replacedertNotNull("Should be able to create file", outputStream);
    }
    Path dir1 = new Path("/d1/d2/d3/d4/key2");
    fs.mkdirs(dir1);
    try (FSDataOutputStream outputStream1 = fs.create(dir1, false)) {
        fail("Should throw FileAlreadyExistsException");
    } catch (FileAlreadyExistsException fae) {
    // ignore as its expected
    }
    Path file2 = new Path("/d1/d2/d3/d4/key3");
    try (FSDataOutputStream outputStream2 = fs.create(file2, false)) {
        replacedertNotNull("Should be able to create file", outputStream2);
    }
    try {
        fs.mkdirs(file2);
        fail("Should throw FileAlreadyExistsException");
    } catch (FileAlreadyExistsException fae) {
    // ignore as its expected
    }
    // Op 3. create file -> /d1/d2/d3 (d3 as a file inside /d1/d2)
    Path file3 = new Path("/d1/d2/d3");
    try (FSDataOutputStream outputStream3 = fs.create(file3, false)) {
        fail("Should throw FileAlreadyExistsException");
    } catch (FileAlreadyExistsException fae) {
    // ignore as its expected
    }
}

19 Source : HadoopUtils.java
with Apache License 2.0
from apache

/**
 * init dolphinscheduler root path in hdfs
 */
private void initHdfsPath() {
    Path path = new Path(resourceUploadPath);
    try {
        if (!fs.exists(path)) {
            fs.mkdirs(path);
        }
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
    }
}

19 Source : HadoopUtils.java
with Apache License 2.0
from apache

/**
 * make the given file and all non-existent parents into
 * directories. Has the semantics of Unix 'mkdir -p'.
 * Existence of the directory hierarchy is not an error.
 *
 * @param hdfsPath path to create
 * @return mkdir result
 * @throws IOException errors
 */
public boolean mkdir(String hdfsPath) throws IOException {
    return fs.mkdirs(new Path(hdfsPath));
}

19 Source : HiveTestUtil.java
with Apache License 2.0
from apache

private static HoodieCommitMetadata createParreplacedion(String parreplacedionPath, boolean isParquetSchemaSimple, boolean useSchemaFromCommitMetadata, String instantTime) throws IOException, URISyntaxException {
    HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
    Path partPath = new Path(hiveSyncConfig.basePath + "/" + parreplacedionPath);
    fileSystem.makeQualified(partPath);
    fileSystem.mkdirs(partPath);
    List<HoodieWriteStat> writeStats = createTestData(partPath, isParquetSchemaSimple, instantTime);
    writeStats.forEach(s -> commitMetadata.addWriteStat(parreplacedionPath, s));
    addSchemaToCommitMetadata(commitMetadata, isParquetSchemaSimple, useSchemaFromCommitMetadata);
    return commitMetadata;
}

19 Source : HiveTestUtil.java
with Apache License 2.0
from apache

private static HoodieCommitMetadata createParreplacedions(int numberOfParreplacedions, boolean isParquetSchemaSimple, boolean useSchemaFromCommitMetadata, DateTime startFrom, String instantTime) throws IOException, URISyntaxException {
    startFrom = startFrom.withTimeAtStartOfDay();
    HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
    for (int i = 0; i < numberOfParreplacedions; i++) {
        String parreplacedionPath = dtfOut.print(startFrom);
        Path partPath = new Path(hiveSyncConfig.basePath + "/" + parreplacedionPath);
        fileSystem.makeQualified(partPath);
        fileSystem.mkdirs(partPath);
        List<HoodieWriteStat> writeStats = createTestData(partPath, isParquetSchemaSimple, instantTime);
        startFrom = startFrom.minusDays(1);
        writeStats.forEach(s -> commitMetadata.addWriteStat(parreplacedionPath, s));
    }
    addSchemaToCommitMetadata(commitMetadata, isParquetSchemaSimple, useSchemaFromCommitMetadata);
    return commitMetadata;
}

19 Source : TestHoodieCombineHiveInputFormat.java
with Apache License 2.0
from apache

@BeforeEach
public void setUp() throws IOException, InterruptedException {
    this.fs = MiniClusterUtil.fileSystem;
    jobConf = new JobConf();
    hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
    replacedertTrue(fs.mkdirs(new Path(tempDir.toAbsolutePath().toString())));
    HoodieTestUtils.init(MiniClusterUtil.configuration, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ);
}

See More Examples