com.google.common.util.concurrent.RateLimiter

Here are the examples of the java api class com.google.common.util.concurrent.RateLimiter taken from open source projects.

1. ApiGatewaySdkApiImporter#buildResourceList()

View license
// todo: optimize number of calls to this as it is an expensive operation
protected List<Resource> buildResourceList(RestApi api) {
    List<Resource> resourceList = new ArrayList<>();
    Resources resources = api.getResources();
    resourceList.addAll(resources.getItem());
    LOG.debug("Building list of resources. Stack trace: ", new Throwable());
    final RateLimiter rl = RateLimiter.create(2);
    while (resources._isLinkAvailable("next")) {
        rl.acquire();
        resources = resources.getNext();
        resourceList.addAll(resources.getItem());
    }
    return resourceList;
}

2. AbstractCompactionStrategy#getScanners()

View license
/**
     * Returns a list of KeyScanners given sstables and a range on which to scan.
     * The default implementation simply grab one SSTableScanner per-sstable, but overriding this method
     * allow for a more memory efficient solution if we know the sstable don't overlap (see
     * LeveledCompactionStrategy for instance).
     */
@SuppressWarnings("resource")
public ScannerList getScanners(Collection<SSTableReader> sstables, Collection<Range<Token>> ranges) {
    RateLimiter limiter = CompactionManager.instance.getRateLimiter();
    ArrayList<ISSTableScanner> scanners = new ArrayList<ISSTableScanner>();
    try {
        for (SSTableReader sstable : sstables) scanners.add(sstable.getScanner(ranges, limiter));
    } catch (Throwable t) {
        try {
            new ScannerList(scanners).close();
        } catch (Throwable t2) {
            t.addSuppressed(t2);
        }
        throw t;
    }
    return new ScannerList(scanners);
}

3. DistributedConsensusLoadTest#startTest()

View license
private void startTest() {
    stopped.set(false);
    RateLimiter limiter = RateLimiter.create(rate);
    Semaphore s = new Semaphore(100);
    while (!stopped.get()) {
        limiter.acquire();
        s.acquireUninterruptibly();
        counters.get(RandomUtils.nextInt(TOTAL_COUNTERS)).incrementAndGet().whenComplete(( r,  e) -> {
            s.release();
            if (e == null) {
                increments.incrementAndGet();
            }
        });
    }
}

4. AbstractCompactionStrategy#getScanners()

View license
/**
     * Returns a list of KeyScanners given sstables and a range on which to scan.
     * The default implementation simply grab one SSTableScanner per-sstable, but overriding this method
     * allow for a more memory efficient solution if we know the sstable don't overlap (see
     * LeveledCompactionStrategy for instance).
     */
public ScannerList getScanners(Collection<SSTableReader> sstables, Range<Token> range) {
    RateLimiter limiter = CompactionManager.instance.getRateLimiter();
    ArrayList<ISSTableScanner> scanners = new ArrayList<ISSTableScanner>();
    try {
        for (SSTableReader sstable : sstables) scanners.add(sstable.getScanner(range, limiter));
    } catch (Throwable t) {
        try {
            new ScannerList(scanners).close();
        } catch (Throwable t2) {
            t.addSuppressed(t2);
        }
        throw t;
    }
    return new ScannerList(scanners);
}

5. BatchlogManager#replayFailedBatches()

Project: cassandra
Source File: BatchlogManager.java
View license
private void replayFailedBatches() {
    logger.trace("Started replayFailedBatches");
    // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
    // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
    int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
    if (endpointsCount <= 0) {
        logger.trace("Replay cancelled as there are no peers in the ring.");
        return;
    }
    int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
    RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
    UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
    ColumnFamilyStore store = Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
    int pageSize = calculatePageSize(store);
    // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
    // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
    // token(id) > token(lastReplayedUuid) as part of the query.
    String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SystemKeyspace.NAME, SystemKeyspace.BATCHES);
    UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
    processBatchlogEntries(batches, pageSize, rateLimiter);
    lastReplayedUuid = limitUuid;
    logger.trace("Finished replayFailedBatches");
}

6. TestNonBlockingReadsMultiReader#testMultiReaders()

View license
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
    String name = "distrlog-multireaders";
    final RateLimiter limiter = RateLimiter.create(1000);
    DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
    confLocal.setOutputBufferSize(0);
    confLocal.setImmediateFlushEnabled(true);
    DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
    final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned();
    FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(0)));
    FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(1)));
    final AtomicInteger writeCount = new AtomicInteger(2);
    DistributedLogManager dlmread = createNewDLM(conf, name);
    BKSyncLogReaderDLSN reader0 = (BKSyncLogReaderDLSN) dlmread.getInputStream(0);
    try {
        ReaderThread[] readerThreads = new ReaderThread[1];
        readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
        // readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
        final AtomicBoolean running = new AtomicBoolean(true);
        Thread writerThread = new Thread("WriteThread") {

            @Override
            public void run() {
                try {
                    long txid = 2;
                    DLSN dlsn = DLSN.InvalidDLSN;
                    while (running.get()) {
                        limiter.acquire();
                        long curTxId = txid++;
                        dlsn = FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
                        writeCount.incrementAndGet();
                        if (curTxId % 1000 == 0) {
                            LOG.info("writer write {}", curTxId);
                        }
                    }
                    LOG.info("Completed writing record at {}", dlsn);
                    Utils.close(writer);
                } catch (DLInterruptedException die) {
                    Thread.currentThread().interrupt();
                } catch (IOException e) {
                }
            }
        };
        for (ReaderThread rt : readerThreads) {
            rt.start();
        }
        writerThread.start();
        TimeUnit.SECONDS.sleep(5);
        LOG.info("Stopping writer");
        running.set(false);
        writerThread.join();
        LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
        while (writeCount.get() > (readerThreads[0].getReadCount())) {
            LOG.info("Write Count = {}, Read Count = {}, ReadAhead = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount(), reader0.getReadAheadPosition() });
            TimeUnit.MILLISECONDS.sleep(100);
        }
        assertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
        for (ReaderThread readerThread : readerThreads) {
            readerThread.stopReading();
        }
    } finally {
        dlmwrite.close();
        reader0.close();
        dlmread.close();
    }
}

7. RecordGenerator#main()

Project: distributedlog
Source File: RecordGenerator.java
View license
public static void main(String[] args) throws Exception {
    if (3 != args.length) {
        System.out.println(HELP);
        return;
    }
    String finagleNameStr = args[0];
    final String streamName = args[1];
    double rate = Double.parseDouble(args[2]);
    RateLimiter limiter = RateLimiter.create(rate);
    DistributedLogClient client = DistributedLogClientBuilder.newBuilder().clientId(ClientId.apply("record-generator")).name("record-generator").thriftmux(true).finagleNameStr(finagleNameStr).build();
    final CountDownLatch keepAliveLatch = new CountDownLatch(1);
    final AtomicLong numWrites = new AtomicLong(0);
    final AtomicBoolean running = new AtomicBoolean(true);
    while (running.get()) {
        limiter.acquire();
        String record = "record-" + System.currentTimeMillis();
        client.write(streamName, ByteBuffer.wrap(record.getBytes(UTF_8))).addEventListener(new FutureEventListener<DLSN>() {

            @Override
            public void onFailure(Throwable cause) {
                System.out.println("Encountered error on writing data");
                cause.printStackTrace(System.err);
                running.set(false);
                keepAliveLatch.countDown();
            }

            @Override
            public void onSuccess(DLSN value) {
                long numSuccesses = numWrites.incrementAndGet();
                if (numSuccesses % 100 == 0) {
                    System.out.println("Write " + numSuccesses + " records.");
                }
            }
        });
    }
    keepAliveLatch.await();
    client.close();
}

8. BatchlogManager#replayAllFailedBatches()

View license
private void replayAllFailedBatches() throws ExecutionException, InterruptedException {
    logger.debug("Started replayAllFailedBatches");
    // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
    // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
    int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / StorageService.instance.getTokenMetadata().getAllEndpoints().size();
    RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
    UntypedResultSet page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE));
    while (!page.isEmpty()) {
        UUID id = processBatchlogPage(page, rateLimiter);
        if (page.size() < PAGE_SIZE)
            // we've exhausted the batchlog, next query would be empty.
            break;
        page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s WHERE token(id) > token(?) LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE), id);
    }
    cleanup();
    logger.debug("Finished replayAllFailedBatches");
}

9. HintedHandOffManager#doDeliverHintsToEndpoint()

View license
/*
     * 1. Get the key of the endpoint we need to handoff
     * 2. For each column, deserialize the mutation and send it to the endpoint
     * 3. Delete the column if the write was successful
     * 4. Force a flush
     */
private void doDeliverHintsToEndpoint(InetAddress endpoint) {
    // find the hints for the node using its token.
    UUID hostId = Gossiper.instance.getHostId(endpoint);
    logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint);
    final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId));
    DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes);
    final AtomicInteger rowsReplayed = new AtomicInteger(0);
    Composite startColumn = Composites.EMPTY;
    int pageSize = calculatePageSize();
    logger.debug("Using pageSize of {}", pageSize);
    // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
    // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272).
    int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
    RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
    delivery: while (true) {
        long now = System.currentTimeMillis();
        QueryFilter filter = QueryFilter.getSliceFilter(epkey, SystemKeyspace.HINTS_CF, startColumn, Composites.EMPTY, false, pageSize, now);
        ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000));
        if (pagingFinished(hintsPage, startColumn)) {
            logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint);
            break;
        }
        // check if node is still alive and we should continue delivery process
        if (!FailureDetector.instance.isAlive(endpoint)) {
            logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
            break;
        }
        List<WriteResponseHandler> responseHandlers = Lists.newArrayList();
        for (final Cell hint : hintsPage) {
            // check if hints delivery has been paused during the process
            if (hintedHandOffPaused) {
                logger.debug("Hints delivery process is paused, aborting");
                break delivery;
            }
            // since (even with gcgs=0) it's still a "relevant" tombstone.
            if (!hint.isLive())
                continue;
            startColumn = hint.name();
            int version = Int32Type.instance.compose(hint.name().get(1));
            DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
            Mutation mutation;
            try {
                mutation = Mutation.serializer.deserialize(in, version);
            } catch (UnknownColumnFamilyException e) {
                logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                continue;
            } catch (IOException e) {
                throw new AssertionError(e);
            }
            for (UUID cfId : mutation.getColumnFamilyIds()) {
                if (hint.timestamp() <= SystemKeyspace.getTruncatedAt(cfId)) {
                    logger.debug("Skipping delivery of hint for truncated columnfamily {}", cfId);
                    mutation = mutation.without(cfId);
                }
            }
            if (mutation.isEmpty()) {
                deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                continue;
            }
            MessageOut<Mutation> message = mutation.createMessage();
            rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
            Runnable callback = new Runnable() {

                public void run() {
                    rowsReplayed.incrementAndGet();
                    deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                }
            };
            WriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.SIMPLE, callback);
            MessagingService.instance().sendRR(message, endpoint, responseHandler, false);
            responseHandlers.add(responseHandler);
        }
        for (WriteResponseHandler handler : responseHandlers) {
            try {
                handler.get();
            } catch (WriteTimeoutException e) {
                logger.info("Timed out replaying hints to {}; aborting ({} delivered)", endpoint, rowsReplayed);
                break delivery;
            }
        }
    }
    // Flush all the tombstones to disk
    hintStore.forceBlockingFlush();
}

10. StressAction#run()

Project: stratio-cassandra
Source File: StressAction.java
View license
public void run() {
    // creating keyspace and column families
    settings.maybeCreateKeyspaces();
    output.println("Sleeping 2s...");
    Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
    if (!settings.command.noWarmup)
        warmup(settings.command.getFactory(settings));
    if (settings.command.truncate == SettingsCommand.TruncateWhen.ONCE)
        settings.command.truncateTables(settings);
    // TODO : move this to a new queue wrapper that gates progress based on a poisson (or configurable) distribution
    RateLimiter rateLimiter = null;
    if (settings.rate.opRateTargetPerSecond > 0)
        rateLimiter = RateLimiter.create(settings.rate.opRateTargetPerSecond);
    boolean success;
    if (settings.rate.minThreads > 0)
        success = runMulti(settings.rate.auto, rateLimiter);
    else
        success = null != run(settings.command.getFactory(settings), settings.rate.threadCount, settings.command.count, settings.command.duration, rateLimiter, settings.command.durationUnits, output);
    if (success)
        output.println("END");
    else
        output.println("FAILURE");
    settings.disconnect();
}