org.apache.accumulo.core.client.Connector

Here are the examples of the java api class org.apache.accumulo.core.client.Connector taken from open source projects.

1. TableUtils#ensureTableExists()

Project: Gaffer
Source File: TableUtils.java
View license
/**
     * Ensures that the table exists, otherwise it creates it and sets it up to
     * receive Gaffer data
     *
     * @param store the accumulo store
     * @throws StoreException if a connection to accumulo could not be created or there is a failure to create a table/iterator
     */
public static void ensureTableExists(final AccumuloStore store) throws StoreException {
    final Connector conn;
    conn = store.getConnection();
    if (!conn.tableOperations().exists(store.getProperties().getTable())) {
        try {
            TableUtils.createTable(store);
        } catch (final TableExistsException e) {
        }
    }
}

2. TableUtils#getStoreConstructorInfo()

Project: Gaffer
Source File: TableUtils.java
View license
/**
     * Returns the map containing all the information needed to create a new
     * instance of the accumulo gaffer.accumulostore
     * <p>
     *
     * @param properties the accumulo properties
     * @return A MapWritable containing all the required information to
     * construct an accumulo gaffer.accumulostore instance
     * @throws StoreException if a table could not be found or other table issues
     */
public static MapWritable getStoreConstructorInfo(final AccumuloProperties properties) throws StoreException {
    final Connector connection = getConnector(properties.getInstanceName(), properties.getZookeepers(), properties.getUserName(), properties.getPassword());
    BatchScanner scanner;
    try {
        scanner = connection.createBatchScanner(AccumuloStoreConstants.GAFFER_UTILS_TABLE, getCurrentAuthorizations(connection), properties.getThreadsForBatchScanner());
    } catch (final TableNotFoundException e) {
        throw new StoreException(e.getMessage(), e);
    }
    scanner.setRanges(Collections.singleton(getTableSetupRange(properties.getTable())));
    final Iterator<Entry<Key, Value>> iter = scanner.iterator();
    if (iter.hasNext()) {
        return getSchemasFromValue(iter.next().getValue());
    } else {
        return null;
    }
}

3. TableUtils#ensureUtilsTableExists()

Project: Gaffer
Source File: TableUtils.java
View license
private static void ensureUtilsTableExists(final AccumuloStore store) throws StoreException {
    final Connector conn = store.getConnection();
    if (!conn.tableOperations().exists(AccumuloStoreConstants.GAFFER_UTILS_TABLE)) {
        try {
            conn.tableOperations().create(AccumuloStoreConstants.GAFFER_UTILS_TABLE);
        } catch (final TableExistsException e) {
        } catch (AccumuloExceptionAccumuloSecurityException |  e) {
            throw new StoreException("Failed to create : " + AccumuloStoreConstants.GAFFER_UTILS_TABLE + " table", e);
        }
    }
}

4. SpatialQueryExample#setupDataStores()

Project: geowave
Source File: SpatialQueryExample.java
View license
private void setupDataStores() throws AccumuloSecurityException, AccumuloException {
    // Initialize VectorDataStore and AccumuloAdapterStore
    MockInstance instance = new MockInstance();
    // For the MockInstance we can user "user" - "password" as our
    // connection tokens
    Connector connector = instance.getConnector("user", new PasswordToken("password"));
    BasicAccumuloOperations operations = new BasicAccumuloOperations(connector);
    dataStore = new AccumuloDataStore(operations);
    adapterStore = new AccumuloAdapterStore(operations);
}

5. AccumuloGeoTableTest#testGetGeoTables()

Project: mrgeo
Source File: AccumuloGeoTableTest.java
View license
//@Ignore
@Test
@Category(UnitTest.class)
public void testGetGeoTables() throws Exception {
    ZooKeeperInstance zkinst = new ZooKeeperInstance(inst, zoo);
    PasswordToken pwTok = new PasswordToken(pw.getBytes());
    Connector conn = zkinst.getConnector(u, pwTok);
    Assert.assertNotNull(conn);
    PasswordToken token = new PasswordToken(pw.getBytes());
    //Authorizations auths = new Authorizations(authsStr.split(","));
    Authorizations auths = new Authorizations("A,B,C,D,ROLE_USER,U".split(","));
    System.out.println(auths.toString());
    Hashtable<String, String> ht = AccumuloUtils.getGeoTables(null, token, auths, conn);
    for (String k : ht.keySet()) {
        System.out.println(k + " => " + ht.get(k));
    }
}

6. AccumuloConnector#getMockConnector()

Project: mrgeo
Source File: AccumuloConnector.java
View license
// end getMockConnector
/**
 * For testing.
 *
 * @param instance
 * @param user
 * @param pass
 * @return
 */
public static Connector getMockConnector(String instance, String user, String pass) throws DataProviderException {
    Instance mock = new MockInstance(instance);
    Connector conn = null;
    try {
        conn = mock.getConnector(user, pass.getBytes());
    } catch (Exception e) {
        throw new DataProviderException("problem creating mock connector - " + e.getMessage());
    }
    return conn;
}

7. AccumuloRangeQueryTest#ingestGeometries()

Project: geowave
Source File: AccumuloRangeQueryTest.java
View license
@Before
public void ingestGeometries() throws AccumuloException, AccumuloSecurityException, IOException {
    final MockInstance mockInstance = new MockInstance();
    final Connector mockConnector = mockInstance.getConnector("root", new PasswordToken(new byte[0]));
    mockDataStore = new AccumuloDataStore(new BasicAccumuloOperations(mockConnector));
    index = new SpatialDimensionalityTypeProvider().createPrimaryIndex();
    adapter = new TestGeometryAdapter();
    try (IndexWriter writer = mockDataStore.createWriter(adapter, index)) {
        writer.write(testdata);
    }
}

8. AccumuloConnector#getConnector()

Project: mrgeo
Source File: AccumuloConnector.java
View license
// end getConnector
public static Connector getConnector(Properties p) throws DataProviderException {
    String pw = p.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_PASSWORD);
    String pwenc = p.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_PWENCODED64);
    if (pwenc != null) {
        if (pwenc.toLowerCase().equals("true")) {
            try {
                pw = Base64Utils.decodeToString(pw);
            } catch (IOExceptionClassNotFoundException |  e) {
                throw new DataProviderException("Error decoding values", e);
            }
        }
    }
    Connector conn = getConnector(p.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_INSTANCE), p.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_ZOOKEEPERS), p.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_USER), pw);
    return conn;
}

9. ExternalIndexMain#getRyaSail()

View license
private static Sail getRyaSail() throws AccumuloException, AccumuloSecurityException {
    Connector connector = ConfigUtils.getConnector(getConf());
    final RdfCloudTripleStore store = new RdfCloudTripleStore();
    AccumuloRyaDAO crdfdao = new AccumuloRyaDAO();
    crdfdao.setConnector(connector);
    AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration(getConf());
    conf.setTablePrefix(tablePrefix);
    crdfdao.setConf(conf);
    store.setRyaDAO(crdfdao);
    return store;
}

10. Backup#run()

Project: lumify
Source File: Backup.java
View license
public void run(BackupOptions backupOptions) throws Exception {
    LOGGER.info("Begin backup");
    Connector conn = createAccumuloConnection(backupOptions);
    FileSystem fileSystem = getHdfsFileSystem(backupOptions);
    List<String> tablesToBackup = getTablesToBackup(conn, backupOptions.getTableNamePrefix());
    try {
        takeTablesOffline(conn, tablesToBackup);
        backupTables(conn, fileSystem, tablesToBackup, backupOptions.getHdfsBackupDirectory());
        saveTablesList(tablesToBackup, fileSystem, backupOptions.getHdfsBackupDirectory());
        backupSecuregraphHdfsOverflowDirectory(fileSystem, backupOptions.getSecuregraphHdfsOverflowDirectory(), backupOptions.getHdfsBackupDirectory());
    } finally {
        takeTableOnline(conn, tablesToBackup);
    }
    LOGGER.info("Backup complete");
}

11. TableOperations#compactTransient()

View license
/**
   * Compact all transient regions that were registered using {@link TransientRegistry}
   */
public static void compactTransient(FluoConfiguration fluoConfig) throws Exception {
    Connector conn = getConnector(fluoConfig);
    try (FluoClient client = FluoFactory.newClient(fluoConfig)) {
        Configuration appConfig = client.getAppConfiguration();
        TransientRegistry transientRegistry = new TransientRegistry(appConfig);
        List<RowRange> ranges = transientRegistry.getTransientRanges();
        for (RowRange r : ranges) {
            long t1 = System.currentTimeMillis();
            conn.tableOperations().compact(fluoConfig.getAccumuloTable(), new Text(r.getStart().toArray()), new Text(r.getEnd().toArray()), true, true);
            long t2 = System.currentTimeMillis();
            logger.info("Compacted {} in {}ms", r, (t2 - t1));
        }
    }
}

12. ConfigUtils#createBatchScanner()

Project: incubator-rya
Source File: ConfigUtils.java
View license
public static BatchScanner createBatchScanner(String tablename, Configuration conf) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    Connector connector = ConfigUtils.getConnector(conf);
    Authorizations auths = ConfigUtils.getAuthorizations(conf);
    Integer numThreads = null;
    if (conf instanceof RdfCloudTripleStoreConfiguration)
        numThreads = ((RdfCloudTripleStoreConfiguration) conf).getNumThreads();
    else
        numThreads = conf.getInt(RdfCloudTripleStoreConfiguration.CONF_NUM_THREADS, 2);
    return connector.createBatchScanner(tablename, auths, numThreads);
}

13. ConfigUtils#createScanner()

Project: incubator-rya
Source File: ConfigUtils.java
View license
public static Scanner createScanner(String tablename, Configuration conf) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    Connector connector = ConfigUtils.getConnector(conf);
    Authorizations auths = ConfigUtils.getAuthorizations(conf);
    return connector.createScanner(tablename, auths);
}

14. ConfigUtils#createMultitableBatchWriter()

Project: incubator-rya
Source File: ConfigUtils.java
View license
public static MultiTableBatchWriter createMultitableBatchWriter(Configuration conf) throws AccumuloException, AccumuloSecurityException {
    Long DEFAULT_MAX_MEMORY = getWriterMaxMemory(conf);
    Long DEFAULT_MAX_LATENCY = getWriterMaxLatency(conf);
    Integer DEFAULT_MAX_WRITE_THREADS = getWriterMaxWriteThreads(conf);
    Connector connector = ConfigUtils.getConnector(conf);
    return connector.createMultiTableBatchWriter(DEFAULT_MAX_MEMORY, DEFAULT_MAX_LATENCY, DEFAULT_MAX_WRITE_THREADS);
}

15. ConfigUtils#createDefaultBatchWriter()

Project: incubator-rya
Source File: ConfigUtils.java
View license
public static BatchWriter createDefaultBatchWriter(String tablename, Configuration conf) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
    Long DEFAULT_MAX_MEMORY = getWriterMaxMemory(conf);
    Long DEFAULT_MAX_LATENCY = getWriterMaxLatency(conf);
    Integer DEFAULT_MAX_WRITE_THREADS = getWriterMaxWriteThreads(conf);
    Connector connector = ConfigUtils.getConnector(conf);
    return connector.createBatchWriter(tablename, DEFAULT_MAX_MEMORY, DEFAULT_MAX_LATENCY, DEFAULT_MAX_WRITE_THREADS);
}

16. IndexWritingToolTest#testIndexWrite()

View license
@Test
public void testIndexWrite() {
    Connector accCon = null;
    Instance inst;
    String[] args = new String[7];
    args[0] = "src/test/resources/ResultsFile1.txt";
    args[1] = "src/test/resources/testQuery.txt";
    args[2] = "instance";
    args[3] = "mock";
    args[4] = "user";
    args[5] = "password";
    args[6] = "table";
    String query = null;
    try {
        query = FileUtils.readFileToString(new File(args[1]));
    } catch (IOException e1) {
        e1.printStackTrace();
    }
    try {
        inst = new MockInstance(args[2]);
        accCon = inst.getConnector(args[4], args[5].getBytes());
        if (accCon.tableOperations().exists(args[6])) {
            accCon.tableOperations().delete(args[6]);
        }
        accCon.tableOperations().create(args[6]);
    } catch (AccumuloException e) {
        e.printStackTrace();
    } catch (AccumuloSecurityException e) {
        e.printStackTrace();
    } catch (TableExistsException e) {
        e.printStackTrace();
    } catch (TableNotFoundException e) {
        e.printStackTrace();
    }
    int result = 5;
    try {
        result = ToolRunner.run(new IndexWritingTool(), args);
    } catch (Exception e) {
        e.printStackTrace();
    }
    Assert.assertEquals(0, result);
    Scanner scan = null;
    try {
        scan = accCon.createScanner("table", new Authorizations());
    } catch (TableNotFoundException e) {
        e.printStackTrace();
    }
    scan.setRange(new Range());
    int count = 0;
    for (Map.Entry<Key, Value> entry : scan) {
        String[] k = entry.getKey().getRow().toString().split("");
        String[] c = entry.getKey().getColumnFamily().toString().split("");
        if (count == 0) {
            Assert.assertEquals(k[0], "person10");
            Assert.assertEquals(k[1], "person8");
            Assert.assertEquals(k[2], "person9");
            Assert.assertEquals(c[0], "z");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "y");
        } else if (count == 2) {
            Assert.assertEquals(k[0], "person2");
            Assert.assertEquals(k[1], "person1");
            Assert.assertEquals(k[2], "person3");
            Assert.assertEquals(c[0], "y");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "z");
        } else if (count == 5) {
            Assert.assertEquals(k[0], "person3");
            Assert.assertEquals(k[1], "person2");
            Assert.assertEquals(k[2], "person4");
            Assert.assertEquals(c[0], "y");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "z");
        } else if (count == 9) {
            Assert.assertEquals(k[0], "person5");
            Assert.assertEquals(k[1], "person3");
            Assert.assertEquals(k[2], "person4");
            Assert.assertEquals(c[0], "z");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "y");
        } else if (count == 13) {
            Assert.assertEquals(k[0], "person6");
            Assert.assertEquals(k[1], "person5");
            Assert.assertEquals(k[2], "person4");
            Assert.assertEquals(c[0], "z");
            Assert.assertEquals(c[1], "y");
            Assert.assertEquals(c[2], "x");
        } else if (count == 17) {
            Assert.assertEquals(k[0], "person7");
            Assert.assertEquals(k[1], "person6");
            Assert.assertEquals(k[2], "person8");
            Assert.assertEquals(c[0], "y");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "z");
        } else if (count == 21) {
            Assert.assertEquals(k[0], "person9");
            Assert.assertEquals(k[1], "person7");
            Assert.assertEquals(k[2], "person8");
            Assert.assertEquals(c[0], "z");
            Assert.assertEquals(c[1], "x");
            Assert.assertEquals(c[2], "y");
        } else if (count == 24) {
            Assert.assertEquals(query, entry.getValue().toString());
            String[] varOrders = entry.getKey().getColumnQualifier().toString().split("");
            Assert.assertEquals(3, varOrders.length);
            Assert.assertEquals(varOrders[0], "z;y;x");
            Assert.assertEquals(varOrders[1], "y;x;z");
            Assert.assertEquals(varOrders[2], "z;x;y");
        } else {
            Assert.assertTrue(k[0].startsWith("person"));
            Assert.assertTrue(k[1].startsWith("person"));
            Assert.assertTrue(k[2].startsWith("person"));
        }
        count++;
    }
    Assert.assertEquals(25, count);
}

17. RyaDirectExample#createPCJ()

Project: incubator-rya
Source File: RyaDirectExample.java
View license
private static void createPCJ(SailRepositoryConnection conn) throws RepositoryException, AccumuloException, AccumuloSecurityException, TableExistsException {
    String queryString1 = //
    "" + //
    "SELECT ?e ?c ?l ?o " + //
    "{" + //
    "  ?c a ?e . " + //
    "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l . " + //
    "  ?e <uri:talksTo> ?o . " + //
    "}";
    String queryString2 = //
    "" + //
    "SELECT ?e ?c ?l ?o " + //
    "{" + //
    "  ?e a ?c . " + //
    "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l . " + //
    "  ?e <uri:talksTo> ?o . " + //
    "}";
    URI obj, subclass, talksTo;
    URI person = new URIImpl("urn:people:alice");
    URI feature = new URIImpl("urn:feature");
    URI sub = new URIImpl("uri:entity");
    subclass = new URIImpl("uri:class");
    obj = new URIImpl("uri:obj");
    talksTo = new URIImpl("uri:talksTo");
    conn.add(person, RDF.TYPE, sub);
    conn.add(feature, RDF.TYPE, sub);
    conn.add(sub, RDF.TYPE, subclass);
    conn.add(sub, RDFS.LABEL, new LiteralImpl("label"));
    conn.add(sub, talksTo, obj);
    AccumuloIndexSet ais1 = null;
    AccumuloIndexSet ais2 = null;
    String tablename1 = RYA_TABLE_PREFIX + "INDEX_1";
    String tablename2 = RYA_TABLE_PREFIX + "INDEX_2";
    Connector accCon = new MockInstance(INSTANCE).getConnector("root", new PasswordToken("".getBytes()));
    accCon.tableOperations().create(tablename1);
    accCon.tableOperations().create(tablename2);
    try {
        ais1 = new AccumuloIndexSet(queryString1, conn, accCon, tablename1);
        ais2 = new AccumuloIndexSet(queryString2, conn, accCon, tablename2);
    } catch (MalformedQueryExceptionSailException | QueryEvaluationException | MutationsRejectedException | TableNotFoundException |  e) {
        log.error("Error creating Accumulo Index", e);
    }
}

18. PrecompJoinOptimizer#getAccIndices()

View license
private static List<ExternalTupleSet> getAccIndices(Configuration conf) throws MalformedQueryException, SailException, QueryEvaluationException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
    List<String> tables = null;
    if (conf instanceof RdfCloudTripleStoreConfiguration) {
        tables = ((RdfCloudTripleStoreConfiguration) conf).getPcjTables();
    }
    String tablePrefix = conf.get(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX);
    Connector c = ConfigUtils.getConnector(conf);
    Map<String, String> indexTables = Maps.newLinkedHashMap();
    if (tables != null && !tables.isEmpty()) {
        for (String table : tables) {
            Scanner s = c.createScanner(table, new Authorizations());
            s.setRange(Range.exact(new Text("~SPARQL")));
            for (Entry<Key, Value> e : s) {
                indexTables.put(table, e.getValue().toString());
            }
        }
    } else {
        for (String table : c.tableOperations().list()) {
            if (table.startsWith(tablePrefix + "INDEX")) {
                Scanner s = c.createScanner(table, new Authorizations());
                s.setRange(Range.exact(new Text("~SPARQL")));
                for (Entry<Key, Value> e : s) {
                    indexTables.put(table, e.getValue().toString());
                }
            }
        }
    }
    List<ExternalTupleSet> index = Lists.newArrayList();
    if (indexTables.isEmpty()) {
        System.out.println("No Index found");
    } else {
        for (String table : indexTables.keySet()) {
            String indexSparqlString = indexTables.get(table);
            index.add(new AccumuloIndexSet(indexSparqlString, c, table));
        }
    }
    return index;
}

19. IndexWritingToolTest#testIndexWrite2()

View license
@Test
public void testIndexWrite2() {
    Connector accCon = null;
    Instance inst;
    String[] args = new String[7];
    args[0] = "src/test/resources/ResultsFile1.txt";
    args[1] = "src/test/resources/testQuery2.txt";
    args[2] = "instance";
    args[3] = "mock";
    args[4] = "user";
    args[5] = "password";
    args[6] = "table";
    String query = null;
    try {
        query = FileUtils.readFileToString(new File(args[1]));
    } catch (IOException e1) {
        e1.printStackTrace();
    }
    try {
        inst = new MockInstance(args[2]);
        accCon = inst.getConnector(args[4], args[5].getBytes());
        if (accCon.tableOperations().exists(args[6])) {
            accCon.tableOperations().delete(args[6]);
        }
        accCon.tableOperations().create(args[6]);
    } catch (AccumuloException e) {
        e.printStackTrace();
    } catch (AccumuloSecurityException e) {
        e.printStackTrace();
    } catch (TableExistsException e) {
        e.printStackTrace();
    } catch (TableNotFoundException e) {
        e.printStackTrace();
    }
    int result = 5;
    try {
        result = ToolRunner.run(new IndexWritingTool(), args);
    } catch (Exception e) {
        e.printStackTrace();
    }
    Assert.assertEquals(0, result);
    Scanner scan = null;
    try {
        scan = accCon.createScanner("table", new Authorizations());
    } catch (TableNotFoundException e) {
        e.printStackTrace();
    }
    scan.setRange(new Range());
    int count = 0;
    for (Map.Entry<Key, Value> entry : scan) {
        String[] k = entry.getKey().getRow().toString().split("");
        String[] c = entry.getKey().getColumnFamily().toString().split("");
        if (count == 0) {
            Assert.assertEquals(k[0], "person1");
            Assert.assertEquals(k[1], "person2");
            Assert.assertEquals(k[2], "person3");
            Assert.assertEquals(c[0], "x");
            Assert.assertEquals(c[1], "y");
            Assert.assertEquals(c[2], "z");
        } else if (count == 2) {
            Assert.assertEquals(k[0], "person3");
            Assert.assertEquals(k[1], "person4");
            Assert.assertEquals(k[2], "person5");
            Assert.assertEquals(c[0], "x");
            Assert.assertEquals(c[1], "y");
            Assert.assertEquals(c[2], "z");
        } else if (count == 5) {
            Assert.assertEquals(k[0], "person6");
            Assert.assertEquals(k[1], "person7");
            Assert.assertEquals(k[2], "person8");
            Assert.assertEquals(c[0], "x");
            Assert.assertEquals(c[1], "y");
            Assert.assertEquals(c[2], "z");
        }
        count++;
        System.out.println(count);
    }
    Assert.assertEquals(9, count);
}

20. ExternalSailExample#main()

View license
public static void main(String[] args) throws Exception {
    Sail s = new MemoryStore();
    SailRepository repo = new SailRepository(s);
    repo.initialize();
    SailRepositoryConnection conn = repo.getConnection();
    URI sub = new URIImpl("uri:entity");
    URI subclass = new URIImpl("uri:class");
    URI obj = new URIImpl("uri:obj");
    URI talksTo = new URIImpl("uri:talksTo");
    conn.add(sub, RDF.TYPE, subclass);
    conn.add(sub, RDFS.LABEL, new LiteralImpl("label"));
    conn.add(sub, talksTo, obj);
    URI sub2 = new URIImpl("uri:entity2");
    URI subclass2 = new URIImpl("uri:class2");
    URI obj2 = new URIImpl("uri:obj2");
    conn.add(sub2, RDF.TYPE, subclass2);
    conn.add(sub2, RDFS.LABEL, new LiteralImpl("label2"));
    conn.add(sub2, talksTo, obj2);
    // TODO Auto-generated method stub
    String indexSparqlString = //
    "" + //
    "SELECT ?e ?l ?c " + //
    "{" + //
    "  ?e a ?c . " + //
    "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l " + //
    "}";
    conn.prepareTupleQuery(QueryLanguage.SPARQL, indexSparqlString).evaluate(new SPARQLResultsXMLWriter(System.out));
    SPARQLParser sp = new SPARQLParser();
    ParsedQuery pq = sp.parseQuery(indexSparqlString, null);
    System.out.println(pq);
    List<ExternalTupleSet> index = Lists.newArrayList();
    Connector accCon = new MockInstance().getConnector("root", "".getBytes());
    String tablename = "table";
    accCon.tableOperations().create(tablename);
    index.add(new AccumuloIndexSet(indexSparqlString, conn, accCon, tablename));
    String queryString = //
    "" + //
    "SELECT ?e ?c ?l ?o " + //
    "{" + //
    "  ?e a ?c . " + //
    "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l . " + //
    "  ?e <uri:talksTo> ?o . " + //
    "}";
    conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(new SPARQLResultsXMLWriter(System.out));
    pq = sp.parseQuery(queryString, null);
    QueryModelTreePrinter mp = new QueryModelTreePrinter();
    pq.getTupleExpr().visit(mp);
    System.out.println(mp.getTreeString());
    System.out.println(pq.getTupleExpr());
    System.out.println("++++++++++++");
    ExternalProcessor processor = new ExternalProcessor(index);
    System.out.println(processor.process(pq.getTupleExpr()));
    System.out.println("----------------");
    Sail processingSail = new ExternalSail(s, processor);
    SailRepository smartSailRepo = new SailRepository(processingSail);
    smartSailRepo.initialize();
    smartSailRepo.getConnection().prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(new SPARQLResultsXMLWriter(System.out));
}

21. Restore#run()

Project: lumify
Source File: Restore.java
View license
public void run(RestoreOptions restoreOptions) throws AccumuloSecurityException, AccumuloException, IOException, TableExistsException, URISyntaxException, InterruptedException {
    LOGGER.info("Begin restore");
    Connector conn = createAccumuloConnection(restoreOptions);
    FileSystem fileSystem = getHdfsFileSystem(restoreOptions);
    if (restoreOptions.getHdfsRestoreTempDirectory() != null) {
        // TODO: exclude the securegraphHdfsOverflowDirectory
        LOGGER.info("Copying backup files from restore directory: " + restoreOptions.getHdfsRestoreDirectory() + " to temp directory: " + restoreOptions.getHdfsRestoreTempDirectory());
        FileUtil.copy(fileSystem, new Path(restoreOptions.getHdfsRestoreDirectory()), fileSystem, new Path(restoreOptions.getHdfsRestoreTempDirectory()), false, fileSystem.getConf());
        List<String> tableNames = getTableList(fileSystem, restoreOptions.getHdfsRestoreTempDirectory());
        restoreTables(conn, tableNames, restoreOptions.getHdfsRestoreTempDirectory());
        LOGGER.info("Deleting restored temp directory: " + restoreOptions.getHdfsRestoreTempDirectory());
        fileSystem.delete(new Path(restoreOptions.getHdfsRestoreTempDirectory()), true);
    } else {
        List<String> tableNames = getTableList(fileSystem, restoreOptions.getHdfsRestoreDirectory());
        restoreTables(conn, tableNames, restoreOptions.getHdfsRestoreDirectory());
        LOGGER.warn("Deleting restored and consumed restore directory: " + restoreOptions.getHdfsRestoreDirectory());
        fileSystem.delete(new Path(restoreOptions.getHdfsRestoreDirectory()), true);
    }
    restoreSecuregraphHdfsOverflowDirectory(fileSystem, restoreOptions.getHdfsRestoreDirectory(), restoreOptions.getSecuregraphHdfsOverflowDirectory());
    LOGGER.info("Restore complete");
}

22. AccumuloConnectorTest#testConnectorAll()

Project: mrgeo
Source File: AccumuloConnectorTest.java
View license
@Ignore
@Test
@Category(UnitTest.class)
public void testConnectorAll() throws Exception {
    Connector conn = AccumuloConnector.getConnector(inst, zoo, u, pw);
    Assert.assertNotNull(conn);
}

23. AccumuloGeoTableTest#testGetTile()

Project: mrgeo
Source File: AccumuloGeoTableTest.java
View license
// end testConnectorAll
@Test
@Category(UnitTest.class)
public void testGetTile() throws Exception {
    ZooKeeperInstance zkinst = new ZooKeeperInstance(inst, zoo);
    PasswordToken pwTok = new PasswordToken(pw.getBytes());
    Connector conn = zkinst.getConnector(u, pwTok);
    Assert.assertNotNull(conn);
    PasswordToken token = new PasswordToken(pw.getBytes());
    Authorizations auths = new Authorizations(authsStr.split(","));
    long start = 0;
    long end = Long.MAX_VALUE;
    Key sKey = AccumuloUtils.toKey(start);
    Key eKey = AccumuloUtils.toKey(end);
    Range r = new Range(sKey, eKey);
    Scanner s = conn.createScanner("paris4", auths);
    s.fetchColumnFamily(new Text(Integer.toString(10)));
    s.setRange(r);
    Iterator<Entry<Key, Value>> it = s.iterator();
    while (it.hasNext()) {
        Entry<Key, Value> ent = it.next();
        if (ent == null) {
            return;
        }
        System.out.println("current key   = " + AccumuloUtils.toLong(ent.getKey().getRow()));
        System.out.println("current value = " + ent.getValue().getSize());
    }
}

24. AbstractAccumuloInputOperator#emitTuples()

View license
@Override
public void emitTuples() {
    Connector conn = store.getConnector();
    Scanner scan = getScanner(conn);
    for (Entry<Key, Value> entry : scan) {
        T tuple = getTuple(entry);
        outputPort.emit(tuple);
    }
}

25. TestAccumuloPigCluster#loadTestData()

Project: pig
Source File: TestAccumuloPigCluster.java
View license
private void loadTestData() throws Exception {
    ZooKeeperInstance inst = new ZooKeeperInstance(accumuloCluster.getInstanceName(), accumuloCluster.getZooKeepers());
    Connector c = inst.getConnector("root", new PasswordToken("password"));
    TableOperations tops = c.tableOperations();
    if (!tops.exists("airports")) {
        tops.create("airports");
    }
    if (!tops.exists("flights")) {
        tops.create("flights");
    }
    BatchWriterConfig config = new BatchWriterConfig();
    config.setMaxWriteThreads(1);
    config.setMaxLatency(100000l, TimeUnit.MILLISECONDS);
    config.setMaxMemory(10000l);
    BatchWriter bw = c.createBatchWriter("airports", config);
    try {
        int i = 1;
        for (Map<String, String> record : AIRPORTS) {
            Mutation m = new Mutation(Integer.toString(i));
            for (Entry<String, String> entry : record.entrySet()) {
                m.put(entry.getKey(), "", entry.getValue());
            }
            bw.addMutation(m);
            i++;
        }
    } finally {
        if (null != bw) {
            bw.close();
        }
    }
    bw = c.createBatchWriter("flights", config);
    try {
        int i = 1;
        for (Map<String, String> record : flightData) {
            Mutation m = new Mutation(Integer.toString(i));
            for (Entry<String, String> entry : record.entrySet()) {
                m.put(entry.getKey(), "", entry.getValue());
            }
            bw.addMutation(m);
            i++;
        }
    } finally {
        if (null != bw) {
            bw.close();
        }
    }
}

26. ExternalIndexMain#main()

View license
public static void main(String[] args) throws Exception {
    Preconditions.checkArgument(args.length == 6, "java " + ExternalIndexMain.class.getCanonicalName() + " sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix.");
    final String sparqlFile = args[0];
    instStr = args[1];
    zooStr = args[2];
    userStr = args[3];
    passStr = args[4];
    tablePrefix = args[5];
    String queryString = FileUtils.readFileToString(new File(sparqlFile));
    // Look for Extra Indexes
    Instance inst = new ZooKeeperInstance(instStr, zooStr);
    Connector c = inst.getConnector(userStr, passStr.getBytes());
    System.out.println("Searching for Indexes");
    Map<String, String> indexTables = Maps.newLinkedHashMap();
    for (String table : c.tableOperations().list()) {
        if (table.startsWith(tablePrefix + "INDEX_")) {
            Scanner s = c.createScanner(table, new Authorizations());
            s.setRange(Range.exact(new Text("~SPARQL")));
            for (Entry<Key, Value> e : s) {
                indexTables.put(table, e.getValue().toString());
            }
        }
    }
    List<ExternalTupleSet> index = Lists.newArrayList();
    if (indexTables.isEmpty()) {
        System.out.println("No Index found");
    } else {
        for (String table : indexTables.keySet()) {
            String indexSparqlString = indexTables.get(table);
            System.out.println("====================== INDEX FOUND ======================");
            System.out.println(" table : " + table);
            System.out.println(" sparql : ");
            System.out.println(indexSparqlString);
            index.add(new AccumuloIndexSet(indexSparqlString, c, table));
        }
    }
    // Connect to Rya
    Sail s = getRyaSail();
    SailRepository repo = new SailRepository(s);
    repo.initialize();
    // Perform Query
    CountingTupleQueryResultHandler count = new CountingTupleQueryResultHandler();
    SailRepositoryConnection conn;
    if (index.isEmpty()) {
        conn = repo.getConnection();
    } else {
        ExternalProcessor processor = new ExternalProcessor(index);
        Sail processingSail = new ExternalSail(s, processor);
        SailRepository smartSailRepo = new SailRepository(processingSail);
        smartSailRepo.initialize();
        conn = smartSailRepo.getConnection();
    }
    startTime = System.currentTimeMillis();
    lastTime = startTime;
    System.out.println("Query Started");
    conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(count);
    System.out.println("Count of Results found : " + count.i);
    System.out.println("Total query time (s) : " + (System.currentTimeMillis() - startTime) / 1000.);
}

27. FluoSparkHelper#bulkImportKvToAccumulo()

View license
/**
   * Bulk import Key/Value data into Accumulo
   *
   * @param data Key/value data to import
   * @param accumuloTable Accumulo table used for import
   * @param opts Bulk import options
   */
public void bulkImportKvToAccumulo(JavaPairRDD<Key, Value> data, String accumuloTable, BulkImportOptions opts) {
    Path tempDir = getTempDir(opts);
    Connector conn = chooseConnector(opts);
    try {
        if (hdfs.exists(tempDir)) {
            throw new IllegalArgumentException("HDFS temp dir already exists: " + tempDir.toString());
        }
        hdfs.mkdirs(tempDir);
        Path dataDir = new Path(tempDir.toString() + "/data");
        Path failDir = new Path(tempDir.toString() + "/fail");
        hdfs.mkdirs(failDir);
        // save data to HDFS
        Job job = Job.getInstance(hadoopConfig);
        AccumuloFileOutputFormat.setOutputPath(job, dataDir);
        // must use new API here as saveAsHadoopFile throws exception
        data.saveAsNewAPIHadoopFile(dataDir.toString(), Key.class, Value.class, AccumuloFileOutputFormat.class, job.getConfiguration());
        // bulk import data to Accumulo
        log.info("Wrote data for bulk import to HDFS temp directory: {}", dataDir);
        conn.tableOperations().importDirectory(accumuloTable, dataDir.toString(), failDir.toString(), false);
        // throw exception if failures directory contains files
        if (hdfs.listFiles(failDir, true).hasNext()) {
            throw new IllegalStateException("Bulk import failed!  Found files that failed to import " + "in failures directory: " + failDir);
        }
        log.info("Successfully bulk imported data in {} to '{}' Accumulo table", dataDir, accumuloTable);
        // delete data directory
        hdfs.delete(tempDir, true);
        log.info("Deleted HDFS temp directory created for bulk import: {}", tempDir);
    // @formatter:off
    } catch (IOExceptionTableNotFoundException | AccumuloException | AccumuloSecurityException |  e) {
        throw new IllegalStateException(e);
    }
}

28. TableOperations#compactTransient()

View license
public static void compactTransient(FluoConfiguration fluoConfig, RowRange tRange) throws Exception {
    Connector conn = getConnector(fluoConfig);
    conn.tableOperations().compact(fluoConfig.getAccumuloTable(), new Text(tRange.getStart().toArray()), new Text(tRange.getEnd().toArray()), true, true);
}

29. TableOperations#optimizeTable()

View license
/**
   * This method will perform all post initialization recommended actions.
   */
public static void optimizeTable(FluoConfiguration fluoConfig, Pirtos pirtos) throws Exception {
    Connector conn = getConnector(fluoConfig);
    TreeSet<Text> splits = new TreeSet<>();
    for (Bytes split : pirtos.getSplits()) {
        splits.add(new Text(split.toArray()));
    }
    String table = fluoConfig.getAccumuloTable();
    conn.tableOperations().addSplits(table, splits);
    if (pirtos.getTabletGroupingRegex() != null && !pirtos.getTabletGroupingRegex().isEmpty()) {
        try {
            // setting this prop first intentionally because it should fail in 1.6
            conn.tableOperations().setProperty(table, RGB_PATTERN_PROP, pirtos.getTabletGroupingRegex());
            conn.tableOperations().setProperty(table, RGB_DEFAULT_PROP, "none");
            conn.tableOperations().setProperty(table, TABLE_BALANCER_PROP, RGB_CLASS);
        } catch (AccumuloException e) {
            logger.warn("Unable to setup regex balancer (this is expected to fail in Accumulo 1.6.X) : " + e.getMessage());
            logger.debug("Unable to setup regex balancer (this is expected to fail in Accumulo 1.6.X)", e);
        }
    }
}

30. TableOperations#getConnector()

View license
private static Connector getConnector(FluoConfiguration fluoConfig) throws Exception {
    ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance(fluoConfig.getAccumuloInstance()).withZkHosts(fluoConfig.getAccumuloZookeepers()));
    Connector conn = zki.getConnector(fluoConfig.getAccumuloUser(), new PasswordToken(fluoConfig.getAccumuloPassword()));
    return conn;
}

31. FluoAdminImpl#accumuloTableExists()

Project: incubator-fluo
Source File: FluoAdminImpl.java
View license
public boolean accumuloTableExists() {
    Connector conn = AccumuloUtil.getConnector(config);
    return conn.tableOperations().exists(config.getAccumuloTable());
}

32. FluoAdminImpl#initialize()

Project: incubator-fluo
Source File: FluoAdminImpl.java
View license
@Override
public void initialize(InitOpts opts) throws AlreadyInitializedException, TableExistsException {
    Preconditions.checkArgument(!ZookeeperUtil.parseRoot(config.getInstanceZookeepers()).equals("/"), "The Zookeeper connection string (set by 'org.apache.fluo.client.zookeeper.connect') " + " must have a chroot suffix.");
    if (zookeeperInitialized() && !opts.getClearZookeeper()) {
        throw new AlreadyInitializedException("Fluo application already initialized at " + config.getAppZookeepers());
    }
    Connector conn = AccumuloUtil.getConnector(config);
    boolean tableExists = conn.tableOperations().exists(config.getAccumuloTable());
    if (tableExists && !opts.getClearTable()) {
        throw new TableExistsException("Accumulo table already exists " + config.getAccumuloTable());
    }
    if (tableExists) {
        logger.info("The Accumulo table '{}' will be dropped and created as requested by user", config.getAccumuloTable());
        try {
            conn.tableOperations().delete(config.getAccumuloTable());
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    try {
        if (rootCurator.checkExists().forPath(appRootDir) != null) {
            logger.info("Clearing Fluo '{}' application in Zookeeper at {}", config.getApplicationName(), config.getAppZookeepers());
            rootCurator.delete().deletingChildrenIfNeeded().forPath(appRootDir);
        }
    } catch (KeeperException.NoNodeException nne) {
    } catch (Exception e) {
        logger.error("An error occurred deleting Zookeeper root of [" + config.getAppZookeepers() + "], error=[" + e.getMessage() + "]");
        throw new RuntimeException(e);
    }
    try {
        initialize(conn);
        updateSharedConfig();
        if (!config.getAccumuloClasspath().trim().isEmpty()) {
            // TODO add fluo version to context name to make it unique
            String contextName = "fluo";
            conn.instanceOperations().setProperty(AccumuloProps.VFS_CONTEXT_CLASSPATH_PROPERTY + "fluo", config.getAccumuloClasspath());
            conn.tableOperations().setProperty(config.getAccumuloTable(), AccumuloProps.TABLE_CLASSPATH, contextName);
        }
        conn.tableOperations().setProperty(config.getAccumuloTable(), AccumuloProps.TABLE_BLOCKCACHE_ENABLED, "true");
    } catch (NodeExistsException nee) {
        throw new AlreadyInitializedException();
    } catch (Exception e) {
        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        }
        throw new RuntimeException(e);
    }
}

33. AppRunner#scanAccumulo()

Project: incubator-fluo
Source File: AppRunner.java
View license
private long scanAccumulo(ScanOptions options, FluoConfiguration sConfig) {
    System.out.println("Scanning data in Accumulo directly for '" + sConfig.getApplicationName() + "' application.");
    Connector conn = AccumuloUtil.getConnector(sConfig);
    ScannerConfiguration scanConfig = null;
    try {
        scanConfig = buildScanConfig(options);
    } catch (IllegalArgumentException e) {
        System.err.println(e.getMessage());
        System.exit(-1);
    }
    long entriesFound = 0;
    try {
        Scanner scanner = conn.createScanner(sConfig.getAccumuloTable(), Authorizations.EMPTY);
        scanner.setRange(SpanUtil.toRange(scanConfig.getSpan()));
        for (Column col : scanConfig.getColumns()) {
            if (col.isQualifierSet()) {
                scanner.fetchColumn(ByteUtil.toText(col.getFamily()), ByteUtil.toText(col.getQualifier()));
            } else {
                scanner.fetchColumnFamily(ByteUtil.toText(col.getFamily()));
            }
        }
        for (String entry : Iterables.transform(scanner, new FluoFormatter())) {
            System.out.println(entry);
        }
    } catch (Exception e) {
        System.out.println("Scan failed - " + e.getMessage());
        entriesFound++;
    }
    return entriesFound;
}

34. TestAccumuloVertexFormat#testAccumuloInputOutput()

View license
/*
     Write a simple parent-child directed graph to Accumulo.
     Run a job which reads the values
     into subclasses that extend AccumuloVertex I/O formats.
     Check the output after the job.
     */
@Test
public void testAccumuloInputOutput() throws Exception {
    if (System.getProperty("prop.mapred.job.tracker") != null) {
        if (log.isInfoEnabled())
            log.info("testAccumuloInputOutput: " + "Ignore this test if not local mode.");
        return;
    }
    File jarTest = new File(System.getProperty("prop.jarLocation"));
    if (!jarTest.exists()) {
        fail("Could not find Giraph jar at " + "location specified by 'prop.jarLocation'. " + "Make sure you built the main Giraph artifact?.");
    }
    //Write out vertices and edges out to a mock instance.
    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
    Connector c = mockInstance.getConnector("root", new byte[] {});
    c.tableOperations().create(TABLE_NAME);
    BatchWriter bw = c.createBatchWriter(TABLE_NAME, 10000L, 1000L, 4);
    Mutation m1 = new Mutation(new Text("0001"));
    m1.put(FAMILY, CHILDREN, new Value("0002".getBytes()));
    bw.addMutation(m1);
    Mutation m2 = new Mutation(new Text("0002"));
    m2.put(FAMILY, CHILDREN, new Value("0003".getBytes()));
    bw.addMutation(m2);
    if (log.isInfoEnabled())
        log.info("Writing mutations to Accumulo table");
    bw.close();
    Configuration conf = new Configuration();
    conf.set(AccumuloVertexOutputFormat.OUTPUT_TABLE, TABLE_NAME);
    /*
        Very important to initialize the formats before
        sending configuration to the GiraphJob. Otherwise
        the internally constructed Job in GiraphJob will
        not have the proper context initialization.
         */
    AccumuloInputFormat.setInputInfo(conf, USER, "".getBytes(), TABLE_NAME, new Authorizations());
    AccumuloInputFormat.setMockInstance(conf, INSTANCE_NAME);
    AccumuloOutputFormat.setOutputInfo(conf, USER, PASSWORD, true, null);
    AccumuloOutputFormat.setMockInstance(conf, INSTANCE_NAME);
    GiraphJob job = new GiraphJob(conf, getCallingMethodName());
    setupConfiguration(job);
    GiraphConfiguration giraphConf = job.getConfiguration();
    giraphConf.setVertexClass(EdgeNotification.class);
    giraphConf.setVertexInputFormatClass(AccumuloEdgeInputFormat.class);
    giraphConf.setVertexOutputFormatClass(AccumuloEdgeOutputFormat.class);
    HashSet<Pair<Text, Text>> columnsToFetch = new HashSet<Pair<Text, Text>>();
    columnsToFetch.add(new Pair<Text, Text>(FAMILY, CHILDREN));
    AccumuloInputFormat.fetchColumns(job.getConfiguration(), columnsToFetch);
    if (log.isInfoEnabled())
        log.info("Running edge notification job using Accumulo input");
    assertTrue(job.run(true));
    Scanner scanner = c.createScanner(TABLE_NAME, new Authorizations());
    scanner.setRange(new Range("0002", "0002"));
    scanner.fetchColumn(FAMILY, OUTPUT_FIELD);
    boolean foundColumn = false;
    if (log.isInfoEnabled())
        log.info("Verify job output persisted correctly.");
    //make sure we found the qualifier.
    assertTrue(scanner.iterator().hasNext());
    //now we check to make sure the expected value from the job persisted correctly.
    for (Map.Entry<Key, Value> entry : scanner) {
        Text row = entry.getKey().getRow();
        assertEquals("0002", row.toString());
        Value value = entry.getValue();
        assertEquals("0001", ByteBufferUtil.toString(ByteBuffer.wrap(value.get())));
        foundColumn = true;
    }
}

35. MapReduceIT#testIngestOSMPBF()

Project: geowave
Source File: MapReduceIT.java
View license
@Test
public void testIngestOSMPBF() throws Exception {
    TestUtils.deleteAll(dataStoreOptions);
    // NOTE: This will probably fail unless you bump up the memory for the
    // tablet
    // servers, for whatever reason, using the
    // miniAccumuloConfig.setMemory() function.
    MapReduceTestEnvironment mrEnv = MapReduceTestEnvironment.getInstance();
    // TODO: for now this only works with accumulo, generalize the data
    // store usage
    AccumuloStoreTestEnvironment accumuloEnv = AccumuloStoreTestEnvironment.getInstance();
    String hdfsPath = mrEnv.getHdfsBaseDirectory() + "/osm_stage/";
    StageOSMToHDFSCommand stage = new StageOSMToHDFSCommand();
    stage.setParameters(TEST_DATA_BASE_DIR, mrEnv.getHdfs(), hdfsPath);
    stage.execute(new ManualOperationParams());
    Connector conn = new ZooKeeperInstance(accumuloEnv.getAccumuloInstance(), accumuloEnv.getZookeeper()).getConnector(accumuloEnv.getAccumuloUser(), new PasswordToken(accumuloEnv.getAccumuloPassword()));
    Authorizations auth = new Authorizations(new String[] { "public" });
    conn.securityOperations().changeUserAuthorizations(accumuloEnv.getAccumuloUser(), auth);
    IngestOSMToGeoWaveCommand ingest = new IngestOSMToGeoWaveCommand();
    ingest.setParameters(mrEnv.getHdfs(), hdfsPath, null);
    ingest.setInputStoreOptions(dataStoreOptions);
    ingest.getIngestOptions().setJobName("ConversionTest");
    // Execute for node's ways, and relations.
    ingest.getIngestOptions().setMapperType("NODE");
    ingest.execute(new ManualOperationParams());
    System.out.println("finished accumulo ingest Node");
    ingest.getIngestOptions().setMapperType("WAY");
    ingest.execute(new ManualOperationParams());
    System.out.println("finished accumulo ingest Way");
    ingest.getIngestOptions().setMapperType("RELATION");
    ingest.execute(new ManualOperationParams());
    System.out.println("finished accumulo ingest Relation");
}

36. AccumuloOptionsTest#setUp()

Project: geowave
Source File: AccumuloOptionsTest.java
View license
@Before
public void setUp() {
    final MockInstance mockInstance = new MockInstance();
    Connector mockConnector = null;
    try {
        mockConnector = mockInstance.getConnector("root", new PasswordToken(new byte[0]));
    } catch (AccumuloExceptionAccumuloSecurityException |  e) {
        LOGGER.error("Failed to create mock accumulo connection", e);
    }
    accumuloOperations = new BasicAccumuloOperations(mockConnector);
    indexStore = new AccumuloIndexStore(accumuloOperations);
    adapterStore = new AccumuloAdapterStore(accumuloOperations);
    statsStore = new AccumuloDataStatisticsStore(accumuloOperations);
    secondaryIndexDataStore = new AccumuloSecondaryIndexDataStore(accumuloOperations, new AccumuloOptions());
    mockDataStore = new AccumuloDataStore(indexStore, adapterStore, statsStore, secondaryIndexDataStore, new AccumuloAdapterIndexMappingStore(accumuloOperations), accumuloOperations, accumuloOptions);
}

37. AccumuloDataStoreStatsTest#setUp()

View license
@Before
public void setUp() {
    final MockInstance mockInstance = new MockInstance();
    Connector mockConnector = null;
    try {
        mockConnector = mockInstance.getConnector("root", new PasswordToken(new byte[0]));
    } catch (AccumuloExceptionAccumuloSecurityException |  e) {
        LOGGER.error("Failed to create mock accumulo connection", e);
    }
    accumuloOperations = new BasicAccumuloOperations(mockConnector);
    indexStore = new AccumuloIndexStore(accumuloOperations);
    adapterStore = new AccumuloAdapterStore(accumuloOperations);
    statsStore = new AccumuloDataStatisticsStore(accumuloOperations);
    secondaryIndexDataStore = new AccumuloSecondaryIndexDataStore(accumuloOperations, new AccumuloOptions());
    mockDataStore = new AccumuloDataStore(indexStore, adapterStore, statsStore, secondaryIndexDataStore, new AccumuloAdapterIndexMappingStore(accumuloOperations), accumuloOperations, accumuloOptions);
}

38. ConnectorPool#getConnector()

Project: geowave
Source File: ConnectorPool.java
View license
public synchronized Connector getConnector(final String zookeeperUrl, final String instanceName, final String userName, final String password) throws AccumuloException, AccumuloSecurityException {
    final ConnectorConfig config = new ConnectorConfig(zookeeperUrl, instanceName, userName, password);
    Connector connector = connectorCache.get(config);
    if (connector == null) {
        final Instance inst = new ZooKeeperInstance(instanceName, zookeeperUrl);
        connector = inst.getConnector(userName, password);
        connectorCache.put(config, connector);
    }
    return connector;
}

39. SimpleIngestTest#setUp()

Project: geowave
Source File: SimpleIngestTest.java
View license
@Before
public void setUp() {
    final MockInstance mockInstance = new MockInstance();
    Connector mockConnector = null;
    try {
        mockConnector = mockInstance.getConnector(AccumuloUser, AccumuloPass);
    } catch (AccumuloExceptionAccumuloSecurityException |  e) {
        LOGGER.error("Failed to create mock accumulo connection", e);
    }
    accumuloOperations = new BasicAccumuloOperations(mockConnector);
    indexStore = new AccumuloIndexStore(accumuloOperations);
    adapterStore = new AccumuloAdapterStore(accumuloOperations);
    statsStore = new AccumuloDataStatisticsStore(accumuloOperations);
    mockDataStore = new AccumuloDataStore(indexStore, adapterStore, statsStore, new AccumuloSecondaryIndexDataStore(accumuloOperations), new AccumuloAdapterIndexMappingStore(accumuloOperations), accumuloOperations, accumuloOptions);
    accumuloOptions.setCreateTable(true);
    accumuloOptions.setUseAltIndex(true);
    accumuloOptions.setPersistDataStatistics(true);
}

40. TableUtils#createTable()

Project: Gaffer
Source File: TableUtils.java
View license
/**
     * Creates a table for Gaffer data and enables the correct Bloom filter;
     * removes the versioning iterator and adds an aggregator Iterator the
     * {@link org.apache.accumulo.core.iterators.user.AgeOffFilter} for the
     * specified time period.
     *
     * @param store the accumulo store
     * @throws StoreException       failure to create accumulo connection or  add iterator settings
     * @throws TableExistsException failure to create table
     */
public static synchronized void createTable(final AccumuloStore store) throws StoreException, TableExistsException {
    // Create table
    final Connector connector = store.getConnection();
    final String tableName = store.getProperties().getTable();
    if (connector.tableOperations().exists(tableName)) {
        return;
    }
    try {
        connector.tableOperations().create(tableName);
        final String repFactor = store.getProperties().getTableFileReplicationFactor();
        if (null != repFactor) {
            connector.tableOperations().setProperty(tableName, Property.TABLE_FILE_REPLICATION.getKey(), repFactor);
        }
        // Enable Bloom filters using ElementFunctor
        LOGGER.info("Enabling Bloom filter on table");
        connector.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
        connector.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), store.getKeyPackage().getKeyFunctor().getClass().getName());
        LOGGER.info("Bloom filter enabled");
        // Remove versioning iterator from table for all scopes
        LOGGER.info("Removing versioning iterator");
        final EnumSet<IteratorScope> iteratorScopes = EnumSet.allOf(IteratorScope.class);
        connector.tableOperations().removeIterator(tableName, "vers", iteratorScopes);
        LOGGER.info("Removed Versioning iterator");
        // Add Combiner iterator to table for all scopes
        LOGGER.info("Adding Aggregator iterator to table for all scopes");
        connector.tableOperations().attachIterator(tableName, store.getKeyPackage().getIteratorFactory().getAggregatorIteratorSetting(store));
        LOGGER.info("Added Aggregator iterator to table for all scopes");
        if (store.getProperties().getEnableValidatorIterator()) {
            // Add validator iterator to table for all scopes
            LOGGER.info("Adding Validator iterator to table for all scopes");
            connector.tableOperations().attachIterator(tableName, store.getKeyPackage().getIteratorFactory().getValidatorIteratorSetting(store));
            LOGGER.info("Added Validator iterator to table for all scopes");
        } else {
            LOGGER.info("Validator iterator has been disabled");
        }
    } catch (AccumuloSecurityExceptionTableNotFoundException | AccumuloException | IteratorSettingException |  e) {
        throw new StoreException(e.getMessage(), e);
    }
    addUpdateUtilsTable(store);
}