com.codahale.metrics.Snapshot

Here are the examples of the java api com.codahale.metrics.Snapshot taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

85 Examples 7

19 Source : MetricsProviderActor.java
with MIT License
from ria-ee

private MetricDto toHistogramDto(String name, Snapshot snapshot) {
    return new HistogramDto(name, snapshot.get75thPercentile(), snapshot.get95thPercentile(), snapshot.get98thPercentile(), snapshot.get99thPercentile(), snapshot.get999thPercentile(), snapshot.getMax(), snapshot.getMean(), snapshot.getMedian(), snapshot.getMin(), snapshot.getStdDev());
}

19 Source : DeltaHdrHistogramReservoirTest.java
with Apache License 2.0
from nosqlbench

// @Test
// public void testStartAndEndTimes() throws IOException {
// //        File tempFile = new File("/tmp/test.hdr");
// File tempFile = File.createTempFile("loghisto", "hdr", new File("/tmp"));
// tempFile.deleteOnExit();
// 
// HistoLoggerConfig hlc = new HistoLoggerConfig("test1session",tempFile, Pattern.compile(".*"));
// 
// long beforeFirstHistoCreated=System.currentTimeMillis();
// DeltaHdrHistogramReservoir dhhr = new DeltaHdrHistogramReservoir("test1metric",
// new Recorder(4));
// 
// dhhr.attachLogWriter(hlc.getLogWriter());
// 
// writeAndSnapshot(dhhr,1,new long[]{1,20,300,4000,50000});
// writeAndSnapshot(dhhr,2,new long[]{60000,7000,800,90,1});
// long afterLastLoggedValue=System.currentTimeMillis();
// 
// HistogramLogReader hlr = new HistogramLogReader(tempFile.getAbsolutePath());
// double startTimeSec = hlr.getStartTimeSec();
// 
// EncodableHistogram histo1 = hlr.nextIntervalHistogram();
// long i1start = histo1.getStartTimeStamp();
// long i1end = histo1.getEndTimeStamp();
// 
// EncodableHistogram histo2 = hlr.nextIntervalHistogram();
// long i2start = histo2.getStartTimeStamp();
// long i2end = histo2.getEndTimeStamp();
// replacedertThat(i1start).isGreaterThanOrEqualTo(beforeFirstHistoCreated);
// replacedertThat(i1end).isGreaterThan(i1start);
// replacedertThat(i2start).isGreaterThanOrEqualTo(i1end);
// replacedertThat(i2end).isLessThanOrEqualTo(afterLastLoggedValue);
// }
private void writeAndSnapshot(DeltaHdrHistogramReservoir dhhr, int interDelay, long[] longs) {
    for (long aLong : longs) {
        dhhr.update(aLong);
        try {
            Thread.sleep(interDelay);
        } catch (InterruptedException ignored) {
        }
    }
    Snapshot snapshot = dhhr.getSnapshot();
}

19 Source : ConvenientSnapshot.java
with Apache License 2.0
from nosqlbench

public clreplaced ConvenientSnapshot extends Snapshot {

    private double NS_PER_S = 1000000000.0D;

    private double NS_PER_MS = 1000000.0D;

    private double NS_PER_US = 1000.0D;

    private Snapshot snapshot;

    ConvenientSnapshot(Snapshot snapshot) {
        this.snapshot = snapshot;
    }

    @Override
    public double getValue(double quantile) {
        return snapshot.getValue(quantile);
    }

    @Override
    public long[] getValues() {
        return snapshot.getValues();
    }

    @Override
    public int size() {
        return snapshot.size();
    }

    @Override
    public long getMax() {
        return snapshot.getMax();
    }

    @Override
    public double getMean() {
        return snapshot.getMean();
    }

    @Override
    public long getMin() {
        return snapshot.getMin();
    }

    @Override
    public double getStdDev() {
        return snapshot.getStdDev();
    }

    @Override
    public void dump(OutputStream output) {
        snapshot.dump(output);
    }

    public double getP50s() {
        return getValue(0.5D) / NS_PER_S;
    }

    public double getP75s() {
        return getValue(0.75D) / NS_PER_S;
    }

    public double getP90s() {
        return getValue(0.90D) / NS_PER_S;
    }

    public double getP95s() {
        return getValue(0.95D) / NS_PER_S;
    }

    public double getP98s() {
        return getValue(0.98D) / NS_PER_S;
    }

    public double getP99s() {
        return getValue(0.99D) / NS_PER_S;
    }

    public double getP999s() {
        return getValue(0.999D) / NS_PER_S;
    }

    public double getP9999s() {
        return getValue(0.9999D) / NS_PER_S;
    }

    public double getP50ms() {
        return getValue(0.5D) / NS_PER_MS;
    }

    public double getP75ms() {
        return getValue(0.75D) / NS_PER_MS;
    }

    public double getP90ms() {
        return getValue(0.90D) / NS_PER_MS;
    }

    public double getP95ms() {
        return getValue(0.95D) / NS_PER_MS;
    }

    public double getP98ms() {
        return getValue(0.98D) / NS_PER_MS;
    }

    public double getP99ms() {
        return getValue(0.99D) / NS_PER_MS;
    }

    public double getP999ms() {
        return getValue(0.999D) / NS_PER_MS;
    }

    public double getP9999ms() {
        return getValue(0.9999D) / NS_PER_MS;
    }

    public double getP50us() {
        return getValue(0.5D) / NS_PER_US;
    }

    public double getP75us() {
        return getValue(0.75D) / NS_PER_US;
    }

    public double getP90us() {
        return getValue(0.90D) / NS_PER_US;
    }

    public double getP95us() {
        return getValue(0.95D) / NS_PER_US;
    }

    public double getP98us() {
        return getValue(0.98D) / NS_PER_US;
    }

    public double getP99us() {
        return getValue(0.99D) / NS_PER_US;
    }

    public double getP999us() {
        return getValue(0.999D) / NS_PER_US;
    }

    public double getP9999us() {
        return getValue(0.9999D) / NS_PER_US;
    }

    public double getP50ns() {
        return getValue(0.5D);
    }

    public double getP75ns() {
        return getValue(0.75D);
    }

    public double getP90ns() {
        return getValue(0.90D);
    }

    public double getP95ns() {
        return getValue(0.95D);
    }

    public double getP98ns() {
        return getValue(0.98D);
    }

    public double getP99ns() {
        return getValue(0.99D);
    }

    public double getP999ns() {
        return getValue(0.999D);
    }

    public double getP9999ns() {
        return getValue(0.9999D);
    }
}

19 Source : DropwizardHistogramStatistics.java
with Apache License 2.0
from ljygz

/**
 * Dropwizard histogram statistics implementation returned by {@link DropwizardHistogramWrapper}.
 * The statistics clreplaced wraps a {@link Snapshot} instance and forwards the method calls accordingly.
 */
clreplaced DropwizardHistogramStatistics extends HistogramStatistics {

    private final com.codahale.metrics.Snapshot snapshot;

    DropwizardHistogramStatistics(com.codahale.metrics.Snapshot snapshot) {
        this.snapshot = snapshot;
    }

    @Override
    public double getQuantile(double quantile) {
        return snapshot.getValue(quantile);
    }

    @Override
    public long[] getValues() {
        return snapshot.getValues();
    }

    @Override
    public int size() {
        return snapshot.size();
    }

    @Override
    public double getMean() {
        return snapshot.getMean();
    }

    @Override
    public double getStdDev() {
        return snapshot.getStdDev();
    }

    @Override
    public long getMax() {
        return snapshot.getMax();
    }

    @Override
    public long getMin() {
        return snapshot.getMin();
    }
}

19 Source : MetricReport.java
with Apache License 2.0
from chubaostream

/**
 * @author LiYue
 * Date: 2019-08-06
 */
public clreplaced MetricReport implements JMetricReport {

    private final long counter;

    private final long traffic;

    private final Snapshot latency;

    private final long start, end;

    private final String name;

    public MetricReport(String name, long counter, long traffic, Snapshot latency, long start, long end) {
        this.counter = counter;
        this.traffic = traffic;
        this.latency = latency;
        this.start = start;
        this.end = end;
        this.name = name;
    }

    @Override
    public long trafficTotal() {
        return traffic;
    }

    @Override
    public long requestsTotal() {
        return counter;
    }

    @Override
    public long trafficPs() {
        long div = (end - start) / 1000000000L;
        return div > 0 ? traffic / div : 0L;
    }

    @Override
    public long requestsPs() {
        long div = (end - start) / 1000000000L;
        return div > 0 ? counter / div : 0L;
    }

    @Override
    public double[] latency() {
        return new double[] { latency.getMean(), latency.getValue(0.5), latency.getValue(0.90), latency.getValue(0.95), latency.getValue(0.99), latency.getValue(0.999), latency.getValue(0.9999), latency.getMax() };
    }

    @Override
    public long reportTime() {
        return end;
    }

    @Override
    public String name() {
        return name;
    }
}

19 Source : GraphiteMetricFormatter.java
with Apache License 2.0
from centro

private String formatSamplingSnapshot(String name, Snapshot snapshot, long timestamp, boolean convertValuesToDurations) {
    StringBuilder outputBuilder = new StringBuilder();
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "max"), convertValuesToDurations ? convertDuration(snapshot.getMax()) : snapshot.getMax(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "mean"), convertValuesToDurations ? convertDuration(snapshot.getMean()) : snapshot.getMean(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "min"), convertValuesToDurations ? convertDuration(snapshot.getMin()) : snapshot.getMin(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "stddev"), convertValuesToDurations ? convertDuration(snapshot.getStdDev()) : snapshot.getStdDev(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p50"), convertValuesToDurations ? convertDuration(snapshot.getMedian()) : snapshot.getMedian(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p75"), convertValuesToDurations ? convertDuration(snapshot.get75thPercentile()) : snapshot.get75thPercentile(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p95"), convertValuesToDurations ? convertDuration(snapshot.get95thPercentile()) : snapshot.get95thPercentile(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p98"), convertValuesToDurations ? convertDuration(snapshot.get98thPercentile()) : snapshot.get98thPercentile(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p99"), convertValuesToDurations ? convertDuration(snapshot.get99thPercentile()) : snapshot.get99thPercentile(), timestamp));
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "p999"), convertValuesToDurations ? convertDuration(snapshot.get999thPercentile()) : snapshot.get999thPercentile(), timestamp));
    return outputBuilder.toString();
}

19 Source : GraphiteMetricFormatter.java
with Apache License 2.0
from centro

private String formatTimer(String name, Timer timer, long timestamp) {
    final Snapshot snapshot = timer.getSnapshot();
    StringBuilder outputBuilder = new StringBuilder();
    outputBuilder.append(formatMetered(name, timer, timestamp));
    outputBuilder.append(formatSamplingSnapshot(name, snapshot, timestamp, true));
    return outputBuilder.toString();
}

19 Source : IrisMetricsFormat.java
with Apache License 2.0
from arcus-smart-home

public static JsonObject toJson(String name, long count, Snapshot snap, List<TagValue> tags, double factor) {
    return toJson(name, count, snap.getMin() * factor, snap.getMax() * factor, snap.getMean() * factor, snap.getStdDev() * factor, snap.getMedian() * factor, snap.get75thPercentile() * factor, snap.get95thPercentile() * factor, snap.get98thPercentile() * factor, snap.get99thPercentile() * factor, snap.get999thPercentile() * factor, tags);
}

19 Source : StatsCommand.java
with Apache License 2.0
from apache

public Comparable[] printFileSizeHistogram(String instantTime, Snapshot s) {
    return new Comparable[] { instantTime, s.getMin(), s.getValue(0.1), s.getMedian(), s.getMean(), s.get95thPercentile(), s.getMax(), s.size(), s.getStdDev() };
}

18 Source : DropwizardAdapterTest.java
with Apache License 2.0
from vladimir-bukhtoyarov

private void checkHistogram(long count, long min, long max, String collector, String window) {
    String name = "gc." + collector + "." + window + ".pauseLatencyMillis";
    Snapshot snapshot = registry.histogram(name).getSnapshot();
    replacedertEquals(min, snapshot.getMin());
    replacedertEquals(max, snapshot.getMax());
    replacedertEquals(count, registry.histogram(name).getCount());
}

18 Source : TestHistoTypes.java
with Apache License 2.0
from nosqlbench

private void summary(long min, long max, Snapshot... snapshots) {
    for (int i = 0; i <= 100; i++) {
        double pct = (double) i / 100.0D;
        double expectedValue = pct * max;
        System.out.format("% 3d %%p is % 11d : ", (long) (pct * 100), (long) expectedValue);
        for (Snapshot snapshot : snapshots) {
            System.out.format("% 10d ", (long) snapshot.getValue(pct));
        }
        System.out.print("\n");
    }
}

18 Source : SlidingWindowHistogramReservoirTest.java
with Apache License 2.0
from HotelsDotCom

@Test
public void cachesTheSnapshotUnitlFurtherUpdates() {
    SlidingWindowHistogramReservoir reservoir = new SlidingWindowHistogramReservoir();
    reservoir.update(5);
    reservoir.update(6);
    reservoir.update(7);
    Snapshot snapshot1 = reservoir.getSnapshot();
    Snapshot snapshot2 = reservoir.getSnapshot();
    replacedertThat(snapshot1, sameInstance(snapshot2));
    reservoir.update(8);
    replacedertThat(reservoir.getSnapshot(), not(sameInstance(snapshot2)));
}

18 Source : UnixSocketClient.java
with Apache License 2.0
from datastax

private int writeMetric(String name, String tags, Timer timer) {
    final Snapshot snapshot = timer.getSnapshot();
    // Lossy version of the stats
    double meanRate = convertRate(timer.getMeanRate());
    double min1Rate = convertRate(timer.getOneMinuteRate());
    double min5rate = convertRate(timer.getFiveMinuteRate());
    double min15rate = convertRate(timer.getFifteenMinuteRate());
    double max = convertDuration(snapshot.getMax());
    double mean = convertDuration(snapshot.getMean());
    double min = convertDuration(snapshot.getMin());
    double stddev = convertDuration(snapshot.getStdDev());
    double p50 = convertDuration(snapshot.getMedian());
    double p75 = convertDuration(snapshot.get75thPercentile());
    double p90 = convertDuration(snapshot.getValue(0.90));
    double p95 = convertDuration(snapshot.get95thPercentile());
    double p98 = convertDuration(snapshot.get98thPercentile());
    double p99 = convertDuration(snapshot.get99thPercentile());
    double p999 = convertDuration(snapshot.get999thPercentile());
    long count = timer.getCount();
    // Force all these to be filtered by insights since we send an equivalent event below
    reportCollectdHistogram(name, FILTER_INSIGHTS_TAG, count, max, mean, min, stddev, p50, p75, p90, p95, p98, p99, p999);
    reportCollectdMeter(name, FILTER_INSIGHTS_TAG, count, meanRate, min1Rate, min5rate, min15rate);
    Map<String, String> buckets = reportPrometheusTimer(name, FILTER_INSIGHTS_TAG, count, snapshot);
    int sent = 3;
    if (!tags.contains(FILTER_INSIGHTS_TAG)) {
        com.datastax.mcac.insights.metrics.Timer t = new com.datastax.mcac.insights.metrics.Timer(name, System.currentTimeMillis(), buckets, timer.getCount(), new SamplingStats((long) min, (long) max, mean, p50, p75, p95, p98, p99, p999, stddev), new RateStats(min1Rate, min5rate, min15rate, meanRate), rateUnit.name(), durationUnit.name());
        try {
            report(t);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        ++sent;
    }
    return sent;
}

17 Source : MetricsPool.java
with Apache License 2.0
from XiaoMi

public void genJsonsFromHistogram(String name, Histogram hist, StringBuilder output) throws JSONException {
    theMetric.counterType = "GAUGE";
    Snapshot s = hist.getSnapshot();
    theMetric.metric = name + ".p99";
    theMetric.tags = getTableTag(name);
    theMetric.value = s.get99thPercentile();
    oneMetricToJson(theMetric, output);
    output.append(',');
    theMetric.metric = name + ".p999";
    theMetric.tags = getTableTag(name);
    theMetric.value = s.get999thPercentile();
    oneMetricToJson(theMetric, output);
}

17 Source : PointsNamespacePanel.java
with Apache License 2.0
from wavefrontHQ

@Override
protected void addFirstRow(Node root, double factor, Collection<Node> nodes, Snapshot snapshot, boolean takeSnapshot) {
    // artificial ".."
    this.table.getTableModel().addRow(// artificial ".."
    "..", (Math.round(factor * root.getRate().getOneMinuteRate()) + "pps"), (Math.round(100.0 * root.getAccessed() / root.getRate().getCount()) + "%"), Math.round(snapshot.getMedian()) + "ms", Math.round(snapshot.get75thPercentile()) + "ms", Math.round(snapshot.get99thPercentile()) + "ms", String.valueOf(root.getEstimatedMetricCardinality()), String.valueOf(root.getEstimatedHostCardinality()), String.valueOf(root.getRange()));
    if (takeSnapshot)
        exportData(rootPath, root, factor, snapshot);
}

17 Source : PointsNamespacePanel.java
with Apache License 2.0
from wavefrontHQ

private void exportData(String namespace, Node node, double factor, Snapshot snapshot) {
    try {
        csvPrinter.printRecord(namespace, Math.round(factor * node.getRate().getOneMinuteRate()) + "pps", Math.round(100.0 * node.getAccessed() / node.getRate().getCount()) + "%", Math.round(snapshot.getMedian()) + "ms", Math.round(snapshot.get75thPercentile()) + "ms", Math.round(snapshot.get99thPercentile()) + "ms", String.valueOf(node.getEstimatedMetricCardinality()), String.valueOf(node.getEstimatedHostCardinality()), String.valueOf(node.getRange()));
        csvPrinter.flush();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

17 Source : IdNamespacePanel.java
with Apache License 2.0
from wavefrontHQ

@Override
protected void addFirstRow(Node root, double factor, Collection<Node> nodes, Snapshot snapshot, boolean takeSnapshot) {
    // artificial ".."
    this.table.getTableModel().addRow(// artificial ".."
    "..", (Math.round(factor * root.getRate().getOneMinuteRate()) + "cps"), String.valueOf(root.getEstimatedMetricCardinality()));
    if (takeSnapshot)
        exportData(rootPath, root, factor);
}

17 Source : DwMetricsDistributionSummary.java
with Apache License 2.0
from sofastack

@Override
public Indicator measure() {
    final long now = clock.wallTime();
    final Snapshot snapshot = impl.getSnapshot();
    return new Indicator(now, id, snapshot.getMean());
}

17 Source : ActiveQuerySnapshot.java
with Apache License 2.0
from NationalSecurityAgency

@Override
public String toString() {
    StringBuilder sb = new StringBuilder();
    sb.append("[").append(activeQueryLogName).append("]");
    sb.append("[").append(this.queryId).append("] ");
    sb.append("ranges=").append(this.numActiveRanges).append(" ");
    if (this.isInCall) {
        sb.append("(tot/cur) ").append(this.totalElapsedTime).append("/").append(this.currentCallTime).append(" ");
    } else {
        sb.append("(tot) ").append(this.totalElapsedTime).append(" ");
    }
    sb.append("(call num/max/avg/min) ");
    for (ActiveQuery.CallType type : ActiveQuery.CallType.values()) {
        String t = type.toString().toLowerCase();
        Snapshot s = this.snapshotMap.get(type);
        if (s != null) {
            sb.append(t).append("=");
            sb.append(getNumCalls(type)).append("/").append(s.getMax() / 1000000).append("/").append(Math.round(s.getMean()) / 1000000).append("/").append(s.getMin() / 1000000).append(" ");
        }
    }
    if (this.doreplacedentSizeBytes > 0) {
        sb.append("(lastDoc bytes/sources/seek/next) ");
        sb.append(this.doreplacedentSizeBytes).append("/");
        sb.append(this.lastSourceCount).append("/");
        sb.append(this.lastSeekCount).append("/");
        sb.append(this.lastNextCount);
    }
    return sb.toString();
}

17 Source : HadoopMetrics2Reporter.java
with Apache License 2.0
from Kyligence

/**
 * Add Dropwizard-Metrics value-distribution data to a Hadoop-Metrics2 record building, converting
 * the durations to the appropriate unit.
 *
 * @param builder A Hadoop-Metrics2 record builder.
 * @param name A base name for this record.
 * @param desc A description for this record.
 * @param snapshot The distribution of measured values.
 * @param count The number of values which were measured.
 */
private void addSnapshot(MetricsRecordBuilder builder, String name, String desc, Snapshot snapshot, long count) {
    builder.addGauge(Interns.info(name + "_count", desc), count);
    addSnapshot(builder, name, desc, snapshot);
}

17 Source : SlidingWindowHistogramReservoirTest.java
with Apache License 2.0
from HotelsDotCom

@Test
public void invalidatesCachedSnapshotEverySlidingWindowTimeLength() {
    TestClock clock = new TestClock();
    SlidingWindowHistogram histogram = new SlidingWindowHistogram.Builder().numberOfIntervals(12).intervalDuration(10, SECONDS).autoResize(true).build();
    SlidingWindowHistogramReservoir reservoir = new SlidingWindowHistogramReservoir(histogram, clock);
    reservoir.update(5);
    reservoir.update(6);
    reservoir.update(7);
    Snapshot snapshot1 = reservoir.getSnapshot();
    Snapshot snapshot2 = reservoir.getSnapshot();
    clock.forward(120 * 1000);
    replacedertThat(snapshot1, sameInstance(snapshot2));
    clock.forward(1);
    replacedertThat(reservoir.getSnapshot(), not(sameInstance(snapshot2)));
}

17 Source : UnixSocketClient.java
with Apache License 2.0
from datastax

/**
 * Do not convertDuration on Histograms, they are not time based
 * and converting will lose the values entirely if max and min are truncated.
 */
private int writeMetric(String name, String tags, Histogram histogram) {
    final Snapshot snapshot = histogram.getSnapshot();
    long max = snapshot.getMax();
    double mean = snapshot.getMean();
    long min = snapshot.getMin();
    double stddev = snapshot.getStdDev();
    double p50 = snapshot.getMedian();
    double p75 = snapshot.get75thPercentile();
    double p90 = snapshot.getValue(0.90);
    double p95 = snapshot.get95thPercentile();
    double p98 = snapshot.get98thPercentile();
    double p99 = snapshot.get99thPercentile();
    double p999 = snapshot.get999thPercentile();
    // Force all these to be filtered by insights since we send an equivalent event below
    reportCollectdHistogram(name, FILTER_INSIGHTS_TAG, histogram.getCount(), max, mean, min, stddev, p50, p75, p90, p95, p98, p99, p999);
    int sent = 1;
    if (!tags.contains(FILTER_INSIGHTS_TAG)) {
        com.datastax.mcac.insights.metrics.Histogram h = new com.datastax.mcac.insights.metrics.Histogram(name, System.currentTimeMillis(), globalTags, histogram.getCount(), new SamplingStats(min, max, mean, p50, p75, p95, p98, p99, p999, stddev));
        try {
            report(h);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        ++sent;
    }
    return sent;
}

16 Source : PointsNamespacePanel.java
with Apache License 2.0
from wavefrontHQ

@Override
protected void addNodes(Node root, double factor, Collection<Node> nodes, Snapshot snapshot, String selectedLabel, boolean takeSnapshot) {
    Ordering<Node> ordering = Ordering.from(getComparator());
    if (reverseSort)
        ordering = ordering.reverse();
    List<Node> sorted = ordering.sortedCopy(nodes);
    int num = 0;
    int newLocation = 0;
    for (Node node : sorted) {
        snapshot = node.getLag().getSnapshot();
        String flattened = StringUtils.abbreviate(node.getFlattened(), 50);
        if (flattened.equals(selectedLabel)) {
            newLocation = num + 1;
        }
        labelToNodeMap.put(flattened, node);
        table.getTableModel().addRow(flattened, (Math.round(factor * node.getRate().getOneMinuteRate()) + "pps"), (Math.round(100.0 * node.getAccessed() / node.getRate().getCount()) + "%"), Math.round(snapshot.getMedian()) + "ms", Math.round(snapshot.get75thPercentile()) + "ms", Math.round(snapshot.get99thPercentile()) + "ms", String.valueOf(node.getEstimatedMetricCardinality()), String.valueOf(node.getEstimatedHostCardinality()), String.valueOf(node.getRange()));
        if (takeSnapshot) {
            exportData(flattened, node, factor, snapshot);
        }
        num++;
        if (num > 1000)
            break;
    }
    table.setSelectedRow(newLocation);
}

16 Source : IdNamespacePanel.java
with Apache License 2.0
from wavefrontHQ

@Override
protected void addNodes(Node root, double factor, Collection<Node> nodes, Snapshot snapshot, String selectedLabel, boolean takeSnapshot) {
    Ordering<Node> ordering = Ordering.from(getComparator());
    if (reverseSort)
        ordering = ordering.reverse();
    List<Node> sorted = ordering.sortedCopy(nodes);
    int num = 0;
    int newLocation = 0;
    for (Node node : sorted) {
        String flattened = StringUtils.abbreviate(node.getFlattened(), 50);
        if (flattened.equals(selectedLabel)) {
            newLocation = num + 1;
        }
        labelToNodeMap.put(flattened, node);
        table.getTableModel().addRow(flattened, (Math.round(factor * node.getRate().getOneMinuteRate()) + "cps"), String.valueOf(node.getEstimatedMetricCardinality()));
        if (takeSnapshot)
            exportData(flattened, node, factor);
        num++;
        if (num > 1000)
            break;
    }
    table.setSelectedRow(newLocation);
}

16 Source : GraphiteMetricFormatter.java
with Apache License 2.0
from centro

private String formatHistogram(String name, Histogram histogram, long timestamp) {
    final Snapshot snapshot = histogram.getSnapshot();
    StringBuilder outputBuilder = new StringBuilder();
    outputBuilder.append(formatLine(MetricNamingUtil.join(name, "count"), histogram.getCount(), timestamp));
    outputBuilder.append(formatSamplingSnapshot(name, snapshot, timestamp, false));
    return outputBuilder.toString();
}

16 Source : CloudWatchReporter.java
with MIT License
from azagniotov

private void stageMetricDatumWithRawSnapshot(final boolean metricConfigured, final String metricName, final Snapshot snapshot, final StandardUnit standardUnit, final List<MetricDatum> metricData) {
    if (metricConfigured) {
        final DimensionedName dimensionedName = DimensionedName.decode(metricName);
        double total = LongStream.of(snapshot.getValues()).sum();
        final StatisticSet statisticSet = StatisticSet.builder().sum(total).sampleCount((double) snapshot.size()).minimum((double) snapshot.getMin()).maximum((double) snapshot.getMax()).build();
        final Set<Dimension> dimensions = new LinkedHashSet<>(builder.globalDimensions);
        dimensions.add(Dimension.builder().name(DIMENSION_NAME_TYPE).value(DIMENSION_SNAPSHOT_SUMMARY).build());
        dimensions.addAll(dimensionedName.getDimensions());
        metricData.add(MetricDatum.builder().timestamp(Instant.ofEpochMilli(builder.clock.getTime())).metricName(dimensionedName.getName()).dimensions(dimensions).statisticValues(statisticSet).storageResolution(highResolution ? HIGH_RESOLUTION : STANDARD_RESOLUTION).unit(standardUnit).build());
    }
}

16 Source : ApptuitDropwizardExports.java
with Apache License 2.0
from ApptuitAI

private MetricFamilySamples fromSnapshotAndCount(String dropwizardName, String durationSuffix, Sampling samplingObj, long count, double factor, String helpMessage) {
    Snapshot snapshot = samplingObj.getSnapshot();
    List<MetricFamilySamples.Sample> samples = Arrays.asList(sampleBuilder.createSample(dropwizardName, durationSuffix + "_min", null, null, snapshot.getMin() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix + "_max", null, null, snapshot.getMax() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix + "_mean", null, null, snapshot.getMean() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix + "_stddev", null, null, snapshot.getStdDev() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.5"), snapshot.getMedian() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.75"), snapshot.get75thPercentile() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.95"), snapshot.get95thPercentile() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.98"), snapshot.get98thPercentile() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.99"), snapshot.get99thPercentile() * factor), sampleBuilder.createSample(dropwizardName, durationSuffix, Collections.singletonList(QUANTILE_TAG_NAME), Collections.singletonList("0.999"), snapshot.get999thPercentile() * factor), sampleBuilder.createSample(dropwizardName, "_count", new ArrayList<>(), new ArrayList<>(), count));
    return new MetricFamilySamples(samples.get(0).name, Type.SUMMARY, helpMessage, samples);
}

16 Source : MetricUtils.java
with Apache License 2.0
from apache

// some snapshots represent time in ns, other snapshots represent raw values (eg. chunk size)
static void addSnapshot(MapWriter.EntryWriter ew, Snapshot snapshot, Predicate<CharSequence> propertyFilter, boolean ms) {
    BiConsumer<String, Object> filter = (k, v) -> {
        if (propertyFilter.test(k)) {
            ew.putNoEx(k, v);
        }
    };
    filter.accept((ms ? MIN_MS : MIN), nsToMs(ms, snapshot.getMin()));
    filter.accept((ms ? MAX_MS : MAX), nsToMs(ms, snapshot.getMax()));
    filter.accept((ms ? MEAN_MS : MEAN), nsToMs(ms, snapshot.getMean()));
    filter.accept((ms ? MEDIAN_MS : MEDIAN), nsToMs(ms, snapshot.getMedian()));
    filter.accept((ms ? STDDEV_MS : STDDEV), nsToMs(ms, snapshot.getStdDev()));
    filter.accept((ms ? P75_MS : P75), nsToMs(ms, snapshot.get75thPercentile()));
    filter.accept((ms ? P95_MS : P95), nsToMs(ms, snapshot.get95thPercentile()));
    filter.accept((ms ? P99_MS : P99), nsToMs(ms, snapshot.get99thPercentile()));
    filter.accept((ms ? P999_MS : P999), nsToMs(ms, snapshot.get999thPercentile()));
}

16 Source : MetricUtils.java
with Apache License 2.0
from apache

/**
 * Convert an instance of {@link Histogram}. NOTE: it's replacedumed that histogram contains non-time
 * based values that don't require unit conversion.
 * @param name metric name
 * @param histogram an instance of {@link Histogram}
 * @param propertyFilter limit what properties of a metric are returned
 * @param simple use simplified representation for complex metrics - instead of a (name, map)
 *             only the selected (name "." key, value) pairs will be produced.
 * @param consumer consumer that accepts produced objects
 */
static void convertHistogram(String name, Histogram histogram, Predicate<CharSequence> propertyFilter, boolean simple, String separator, BiConsumer<String, Object> consumer) {
    Snapshot snapshot = histogram.getSnapshot();
    if (simple) {
        if (propertyFilter.test(MEAN)) {
            consumer.accept(name + separator + MEAN, snapshot.getMean());
        }
    } else {
        MapWriter writer = ew -> {
            String prop = "count";
            if (propertyFilter.test(prop)) {
                ew.putNoEx(prop, histogram.getCount());
            }
            // non-time based values
            addSnapshot(ew, snapshot, propertyFilter, false);
        };
        consumer.accept(name, writer);
    }
}

16 Source : RandomKeyGenerator.java
with Apache License 2.0
from apache

/**
 * Prints stats of {@link Freon} run to the PrintStream.
 *
 * @param out PrintStream
 */
private void printStats(PrintStream out) {
    long endTime = System.nanoTime() - startTime;
    String execTime = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis(endTime), DURATION_FORMAT);
    long volumeTime = TimeUnit.NANOSECONDS.toMillis(volumeCreationTime.get()) / threadPoolSize;
    String prettyAverageVolumeTime = DurationFormatUtils.formatDuration(volumeTime, DURATION_FORMAT);
    long bucketTime = TimeUnit.NANOSECONDS.toMillis(bucketCreationTime.get()) / threadPoolSize;
    String prettyAverageBucketTime = DurationFormatUtils.formatDuration(bucketTime, DURATION_FORMAT);
    long averageKeyCreationTime = TimeUnit.NANOSECONDS.toMillis(keyCreationTime.get()) / threadPoolSize;
    String prettyAverageKeyCreationTime = DurationFormatUtils.formatDuration(averageKeyCreationTime, DURATION_FORMAT);
    long averageKeyWriteTime = TimeUnit.NANOSECONDS.toMillis(keyWriteTime.get()) / threadPoolSize;
    String prettyAverageKeyWriteTime = DurationFormatUtils.formatDuration(averageKeyWriteTime, DURATION_FORMAT);
    out.println();
    out.println("***************************************************");
    out.println("Status: " + (exception != null ? "Failed" : "Success"));
    out.println("Git Base Revision: " + VersionInfo.getRevision());
    out.println("Number of Volumes created: " + numberOfVolumesCreated);
    out.println("Number of Buckets created: " + numberOfBucketsCreated);
    out.println("Number of Keys added: " + numberOfKeysAdded);
    out.println("Ratis replication factor: " + factor.name());
    out.println("Ratis replication type: " + type.name());
    out.println("Average Time spent in volume creation: " + prettyAverageVolumeTime);
    out.println("Average Time spent in bucket creation: " + prettyAverageBucketTime);
    out.println("Average Time spent in key creation: " + prettyAverageKeyCreationTime);
    out.println("Average Time spent in key write: " + prettyAverageKeyWriteTime);
    out.println("Total bytes written: " + totalBytesWritten);
    if (validateWrites) {
        out.println("Total number of writes validated: " + totalWritesValidated);
        out.println("Writes validated: " + (100.0 * totalWritesValidated / numberOfKeysAdded.get()) + " %");
        out.println("Successful validation: " + writeValidationSuccessCount);
        out.println("Unsuccessful validation: " + writeValidationFailureCount);
    }
    out.println("Total Execution time: " + execTime);
    out.println("***************************************************");
    if (jsonDir != null) {
        String[][] quantileTime = new String[FreonOps.values().length][QUANTILES + 1];
        String[] deviations = new String[FreonOps.values().length];
        String[] means = new String[FreonOps.values().length];
        for (FreonOps ops : FreonOps.values()) {
            Snapshot snapshot = histograms.get(ops.ordinal()).getSnapshot();
            for (int i = 0; i <= QUANTILES; i++) {
                quantileTime[ops.ordinal()][i] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getValue((1.0 / QUANTILES) * i)), DURATION_FORMAT);
            }
            deviations[ops.ordinal()] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getStdDev()), DURATION_FORMAT);
            means[ops.ordinal()] = DurationFormatUtils.formatDuration(TimeUnit.NANOSECONDS.toMillis((long) snapshot.getMean()), DURATION_FORMAT);
        }
        FreonJobInfo jobInfo = new FreonJobInfo().setExecTime(execTime).setGitBaseRevision(VersionInfo.getRevision()).setMeanVolumeCreateTime(means[FreonOps.VOLUME_CREATE.ordinal()]).setDeviationVolumeCreateTime(deviations[FreonOps.VOLUME_CREATE.ordinal()]).setTenQuantileVolumeCreateTime(quantileTime[FreonOps.VOLUME_CREATE.ordinal()]).setMeanBucketCreateTime(means[FreonOps.BUCKET_CREATE.ordinal()]).setDeviationBucketCreateTime(deviations[FreonOps.BUCKET_CREATE.ordinal()]).setTenQuantileBucketCreateTime(quantileTime[FreonOps.BUCKET_CREATE.ordinal()]).setMeanKeyCreateTime(means[FreonOps.KEY_CREATE.ordinal()]).setDeviationKeyCreateTime(deviations[FreonOps.KEY_CREATE.ordinal()]).setTenQuantileKeyCreateTime(quantileTime[FreonOps.KEY_CREATE.ordinal()]).setMeanKeyWriteTime(means[FreonOps.KEY_WRITE.ordinal()]).setDeviationKeyWriteTime(deviations[FreonOps.KEY_WRITE.ordinal()]).setTenQuantileKeyWriteTime(quantileTime[FreonOps.KEY_WRITE.ordinal()]);
        String jsonName = new SimpleDateFormat("yyyyMMddHHmmss").format(Time.now()) + ".json";
        String jsonPath = jsonDir + "/" + jsonName;
        try (FileOutputStream os = new FileOutputStream(jsonPath)) {
            ObjectMapper mapper = new ObjectMapper();
            mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
            ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
            writer.writeValue(os, jobInfo);
        } catch (FileNotFoundException e) {
            out.println("Json File could not be created for the path: " + jsonPath);
            out.println(e);
        } catch (IOException e) {
            out.println("Json object could not be created");
            out.println(e);
        }
    }
}

15 Source : NamespacePanel.java
with Apache License 2.0
from wavefrontHQ

public void renderNodes(Node root, double factor, Collection<Node> nodes, boolean takeSnapshot) {
    synchronized (table) {
        @Nullable
        String selectedLabel = null;
        if (table.getSelectedRow() >= 0 && table.getTableModel().getRowCount() > 0) {
            List<String> selectedRow = table.getTableModel().getRow(table.getSelectedRow());
            selectedLabel = selectedRow.get(0);
        }
        int count = table.getTableModel().getRowCount();
        for (int i = 0; i < count; i++) {
            table.getTableModel().removeRow(0);
        }
        labelToNodeMap.clear();
        Snapshot snapshot = root.getLag().getSnapshot();
        addFirstRow(root, factor, nodes, snapshot, takeSnapshot);
        addNodes(root, factor, nodes, snapshot, selectedLabel, takeSnapshot);
    }
}

15 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

@Test
public void aReservoirOf100OutOf10Elements() {
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(100).alpha(0.99).rescaleThreshold(Duration.ofHours(1)).build();
    for (int i = 0; i < 10; i++) {
        reservoir.update(i);
    }
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.size()).isEqualTo(10);
    replacedertThat(snapshot.size()).isEqualTo(10);
    replacedertAllValuesBetween(reservoir, 0, 10);
}

15 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

@Test
public void longPeriodsOfInactivity_fetchShouldResample() {
    ManualClock clock = new ManualClock();
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(10).alpha(.015).clock(clock).build();
    // add 1000 values at a rate of 10 values/second
    for (int i = 0; i < 1000; i++) {
        reservoir.update(1000 + i);
        clock.addMillis(100);
    }
    replacedertThat(reservoir.getSnapshot().size()).isEqualTo(10);
    replacedertAllValuesBetween(reservoir, 1000, 2000);
    // wait for 20 hours and take snapshot.
    // this should trigger a rescale. Note that the number of samples will be reduced to 0
    // because scaling factor equal to zero will remove all existing entries after rescale.
    clock.addHours(20);
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.getMax()).isEqualTo(0);
    replacedertThat(snapshot.getMean()).isEqualTo(0);
    replacedertThat(snapshot.getMedian()).isEqualTo(0);
    replacedertThat(snapshot.size()).isEqualTo(0);
}

15 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

private static void testShortPeriodShouldNotRescale(long startTimeNanos) {
    ManualClock clock = new ManualClock(startTimeNanos);
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(10).alpha(1).clock(clock).build();
    reservoir.update(1000);
    replacedertThat(reservoir.getSnapshot().size()).isEqualTo(1);
    replacedertAllValuesBetween(reservoir, 1000, 1001);
    // wait for 10 millis and take snapshot.
    // this should not trigger a rescale. Note that the number of samples will be reduced to 0
    // because scaling factor equal to zero will remove all existing entries after rescale.
    clock.addSeconds(20 * 60);
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.getMax()).isEqualTo(1000);
    replacedertThat(snapshot.getMean()).isEqualTo(1000);
    replacedertThat(snapshot.getMedian()).isEqualTo(1000);
    replacedertThat(snapshot.size()).isEqualTo(1);
}

15 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

@Test
public void emptyReservoirSnapshot_shouldReturnZeroForAllValues() {
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(100).alpha(0.015).build();
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.getMax()).isEqualTo(0);
    replacedertThat(snapshot.getMean()).isEqualTo(0);
    replacedertThat(snapshot.getMedian()).isEqualTo(0);
    replacedertThat(snapshot.size()).isEqualTo(0);
}

15 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

@Test
public void aHeavilyBiasedReservoirOf100OutOf1000Elements() {
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(1000).alpha(0.01).build();
    for (int i = 0; i < 100; i++) {
        reservoir.update(i);
    }
    replacedertThat(reservoir.size()).isEqualTo(100);
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.size()).isEqualTo(100);
    replacedertAllValuesBetween(reservoir, 0, 100);
}

15 Source : LevelDBBenchmark.java
with Apache License 2.0
from lsds

@AfterClreplaced
public static void report() {
    if (metrics.getTimers().isEmpty()) {
        return;
    }
    int headingPrefix = 0;
    for (Map.Entry<String, Timer> e : metrics.getTimers().entrySet()) {
        headingPrefix = Math.max(e.getKey().length(), headingPrefix);
    }
    headingPrefix += 4;
    StringBuilder heading = new StringBuilder();
    for (int i = 0; i < headingPrefix; i++) {
        heading.append(" ");
    }
    heading.append("\tcount");
    heading.append("\tmean");
    heading.append("\tmin");
    heading.append("\tmax");
    heading.append("\t95th");
    System.out.println(heading);
    for (Map.Entry<String, Timer> e : metrics.getTimers().entrySet()) {
        StringBuilder row = new StringBuilder();
        row.append(e.getKey());
        for (int i = 0; i < headingPrefix - e.getKey().length(); i++) {
            row.append(" ");
        }
        Snapshot s = e.getValue().getSnapshot();
        row.append("\t").append(e.getValue().getCount());
        row.append("\t").append(toMs(s.getMean()));
        row.append("\t").append(toMs(s.getMin()));
        row.append("\t").append(toMs(s.getMax()));
        row.append("\t").append(toMs(s.get95thPercentile()));
        System.out.println(row);
    }
    Slf4jReporter.forRegistry(metrics).outputTo(LoggerFactory.getLogger(LevelDBBenchmark.clreplaced)).build().report();
}

15 Source : DropwizardMetricsExporter.java
with Apache License 2.0
from dhatim

private void writeSnapshotAndCount(String dropwizardName, Snapshot snapshot, long count, double factor, MetricType type, String helpMessage) throws IOException {
    String name = sanitizeMetricName(dropwizardName);
    writer.writeHelp(name, helpMessage);
    writer.writeType(name, type);
    writer.writeSample(name, mapOf("quantile", "0.5"), snapshot.getMedian() * factor);
    writer.writeSample(name, mapOf("quantile", "0.75"), snapshot.get75thPercentile() * factor);
    writer.writeSample(name, mapOf("quantile", "0.95"), snapshot.get95thPercentile() * factor);
    writer.writeSample(name, mapOf("quantile", "0.98"), snapshot.get98thPercentile() * factor);
    writer.writeSample(name, mapOf("quantile", "0.99"), snapshot.get99thPercentile() * factor);
    writer.writeSample(name, mapOf("quantile", "0.999"), snapshot.get999thPercentile() * factor);
    writer.writeSample(name + "_min", emptyMap(), snapshot.getMin());
    writer.writeSample(name + "_max", emptyMap(), snapshot.getMax());
    writer.writeSample(name + "_median", emptyMap(), snapshot.getMedian());
    writer.writeSample(name + "_mean", emptyMap(), snapshot.getMean());
    writer.writeSample(name + "_stddev", emptyMap(), snapshot.getStdDev());
    writer.writeSample(name + "_count", emptyMap(), count);
}

15 Source : AbstractReportingExecutionListenerTest.java
with Apache License 2.0
from datastax

abstract clreplaced AbstractReportingExecutionListenerTest {

    final LogInterceptor interceptor;

    @Mock
    MetricsCollectingExecutionListener delegate;

    @Mock
    Timer total;

    @Mock
    Counter successful;

    @Mock
    Counter failed;

    @Mock
    Counter inFlight;

    @Mock
    Meter bytesSent;

    @Mock
    Meter bytesReceived;

    @Mock
    Snapshot latencies;

    AbstractReportingExecutionListenerTest(LogInterceptor interceptor) {
        this.interceptor = interceptor;
    }

    @BeforeEach
    void setUpMetrics() {
        when(delegate.getRegistry()).thenReturn(new MetricRegistry());
        when(delegate.getInFlightRequestsCounter()).thenReturn(inFlight);
        when(total.getCount()).thenReturn(100_000L);
        when(total.getMeanRate()).thenReturn(1_000d);
        when(successful.getCount()).thenReturn(99_999L);
        when(failed.getCount()).thenReturn(1L);
        when(inFlight.getCount()).thenReturn(500L);
        when(total.getSnapshot()).thenReturn(latencies);
        when(latencies.getMean()).thenReturn((double) MILLISECONDS.toNanos(50));
        when(latencies.get99thPercentile()).thenReturn((double) MILLISECONDS.toNanos(100));
        when(latencies.get999thPercentile()).thenReturn((double) MILLISECONDS.toNanos(250));
    }
}

15 Source : CloudWatchReporter.java
with MIT License
from azagniotov

private void stageMetricDatumWithConvertedSnapshot(final boolean metricConfigured, final String metricName, final Snapshot snapshot, final StandardUnit standardUnit, final List<MetricDatum> metricData) {
    if (metricConfigured) {
        final DimensionedName dimensionedName = DimensionedName.decode(metricName);
        double scaledSum = convertDuration(LongStream.of(snapshot.getValues()).sum());
        final StatisticSet statisticSet = StatisticSet.builder().sum(scaledSum).sampleCount((double) snapshot.size()).minimum(convertDuration(snapshot.getMin())).maximum(convertDuration(snapshot.getMax())).build();
        final Set<Dimension> dimensions = new LinkedHashSet<>(builder.globalDimensions);
        dimensions.add(Dimension.builder().name(DIMENSION_NAME_TYPE).value(DIMENSION_SNAPSHOT_SUMMARY).build());
        dimensions.addAll(dimensionedName.getDimensions());
        metricData.add(MetricDatum.builder().timestamp(Instant.ofEpochMilli(builder.clock.getTime())).metricName(dimensionedName.getName()).dimensions(dimensions).statisticValues(statisticSet).storageResolution(highResolution ? HIGH_RESOLUTION : STANDARD_RESOLUTION).unit(standardUnit).build());
    }
}

15 Source : CloudWatchReporter.java
with MIT License
from azagniotov

/**
 * The {@link Snapshot} values of {@link Timer} are reported as {@link StatisticSet} after conversion. The
 * conversion is done using the duration factor, which is deduced from the set duration unit.
 * <p>
 * Please note, the reported values submitted only if they show some data (greater than zero) in order to:
 * <p>
 * 1. save some money
 * 2. prevent com.amazonaws.services.cloudwatch.model.InvalidParameterValueException if empty {@link Snapshot}
 * is submitted
 * <p>
 * If {@link Builder#withZeroValuesSubmission()} is {@code true}, then all values will be submitted
 *
 * @see Timer#getSnapshot
 * @see #getDurationUnit
 * @see #convertDuration(double)
 */
private void processTimer(final String metricName, final Timer timer, final List<MetricDatum> metricData) {
    final Snapshot snapshot = timer.getSnapshot();
    if (builder.withZeroValuesSubmission || snapshot.size() > 0) {
        for (final Percentile percentile : builder.percentiles) {
            final double convertedDuration = convertDuration(snapshot.getValue(percentile.getQuantile()));
            stageMetricDatum(true, metricName, convertedDuration, durationUnit, percentile.getDesc(), metricData);
        }
    }
    // prevent empty snapshot from causing InvalidParameterValueException
    if (snapshot.size() > 0) {
        final String formattedDuration = String.format(" [in-%s]", getDurationUnit());
        stageMetricDatum(builder.withArithmeticMean, metricName, convertDuration(snapshot.getMean()), durationUnit, DIMENSION_SNAPSHOT_MEAN + formattedDuration, metricData);
        stageMetricDatum(builder.withStdDev, metricName, convertDuration(snapshot.getStdDev()), durationUnit, DIMENSION_SNAPSHOT_STD_DEV + formattedDuration, metricData);
        stageMetricDatumWithConvertedSnapshot(builder.withStatisticSet, metricName, snapshot, durationUnit, metricData);
    }
}

15 Source : MetricUtils.java
with Apache License 2.0
from apache

/**
 * Adds metrics from a Timer to a NamedList, using well-known back-compat names.
 * @param lst The NamedList to add the metrics data to
 * @param timer The Timer to extract the metrics from
 */
public static void addMetrics(NamedList<Object> lst, Timer timer) {
    Snapshot snapshot = timer.getSnapshot();
    lst.add("avgRequestsPerSecond", timer.getMeanRate());
    lst.add("5minRateRequestsPerSecond", timer.getFiveMinuteRate());
    lst.add("15minRateRequestsPerSecond", timer.getFifteenMinuteRate());
    lst.add("avgTimePerRequest", nsToMs(snapshot.getMean()));
    lst.add("medianRequestTime", nsToMs(snapshot.getMedian()));
    lst.add("75thPcRequestTime", nsToMs(snapshot.get75thPercentile()));
    lst.add("95thPcRequestTime", nsToMs(snapshot.get95thPercentile()));
    lst.add("99thPcRequestTime", nsToMs(snapshot.get99thPercentile()));
    lst.add("999thPcRequestTime", nsToMs(snapshot.get999thPercentile()));
}

14 Source : LockFreeExponentiallyDecayingReservoirTest.java
with Apache License 2.0
from palantir

@Test
public void aReservoirOf100OutOf1000Elements() {
    Reservoir reservoir = LockFreeExponentiallyDecayingReservoir.builder().size(100).alpha(0.99).rescaleThreshold(Duration.ofHours(1)).build();
    for (int i = 0; i < 1000; i++) {
        reservoir.update(i);
    }
    replacedertThat(reservoir.size()).isEqualTo(100);
    Snapshot snapshot = reservoir.getSnapshot();
    replacedertThat(snapshot.size()).isEqualTo(100);
    replacedertAllValuesBetween(reservoir, 0, 1000);
}

14 Source : MetricsFormatter.java
with Apache License 2.0
from NationalSecurityAgency

public String formatTimer(final String name, final Timer timer) {
    final Snapshot snapshot = timer.getSnapshot();
    return String.format("STAT: %s => min=%2.2f,  max=%2.2f, avg=%2.2f, events=%d", name, convertDuration(snapshot.getMin()), convertDuration(snapshot.getMax()), convertDuration(snapshot.getMean()), timer.getCount());
}

14 Source : HadoopMetrics2Reporter.java
with Apache License 2.0
from Kyligence

/**
 * Add Dropwizard-Metrics value-distribution data to a Hadoop-Metrics2 record building, converting
 * the durations to the appropriate unit.
 *
 * @param builder A Hadoop-Metrics2 record builder.
 * @param name A base name for this record.
 * @param desc A description for this record.
 * @param snapshot The distribution of measured values.
 */
private void addSnapshot(MetricsRecordBuilder builder, String name, String desc, Snapshot snapshot) {
    builder.addGauge(Interns.info(name + "_mean", desc), convertDuration(snapshot.getMean()));
    builder.addGauge(Interns.info(name + "_min", desc), convertDuration(snapshot.getMin()));
    builder.addGauge(Interns.info(name + "_max", desc), convertDuration(snapshot.getMax()));
    builder.addGauge(Interns.info(name + "_median", desc), convertDuration(snapshot.getMedian()));
    builder.addGauge(Interns.info(name + "_stddev", desc), convertDuration(snapshot.getStdDev()));
    builder.addGauge(Interns.info(name + "_75thpercentile", desc), convertDuration(snapshot.get75thPercentile()));
    builder.addGauge(Interns.info(name + "_95thpercentile", desc), convertDuration(snapshot.get95thPercentile()));
    builder.addGauge(Interns.info(name + "_98thpercentile", desc), convertDuration(snapshot.get98thPercentile()));
    builder.addGauge(Interns.info(name + "_99thpercentile", desc), convertDuration(snapshot.get99thPercentile()));
    builder.addGauge(Interns.info(name + "_999thpercentile", desc), convertDuration(snapshot.get999thPercentile()));
}

14 Source : UnixSocketClient.java
with Apache License 2.0
from datastax

/**
 * Converts our latency metrics into a prometheus compatible ones
 * https://www.robustperception.io/why-are-prometheus-histograms-replacedulative
 * https://prometheus.io/docs/practices/histograms
 *
 * @return buckets as tags for Insights use.
 */
Map<String, String> reportPrometheusTimer(String name, String tags, long count, Snapshot snapshot) {
    int n = runtimeConfig.metric_sampling_interval_in_seconds;
    StringBuilder msg = new StringBuilder(512).append(ip).append("/mcac-").append(name).append("/").append("micros interval=").append(n).append(" ").append(tags).append(" N:").append(count).append(":").append(// calculate the sum from the avg
    snapshot.getMean() * count);
    Map<String, String> bucketTags = Maps.newHashMapWithExpectedSize(globalTags.size() + latencyBuckets.length);
    bucketTags.putAll(globalTags);
    long[] buckets = inputBuckets;
    long[] values = snapshot.getValues();
    String snapshotClreplaced = snapshot.getClreplaced().getName();
    if (snapshotClreplaced.contains("EstimatedHistogramReservoirSnapshot")) {
        buckets = decayingBuckets;
    } else if (snapshotClreplaced.contains("DecayingEstimatedHistogram")) {
        try {
            Method m = decayingHistogramOffsetMethod;
            if (m == null) {
                m = snapshot.getClreplaced().getMethod("getOffsets");
                decayingHistogramOffsetMethod = m;
            }
            buckets = (long[]) m.invoke(snapshot);
        } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
        // nothing we can do
        }
    }
    // This can happen if histogram isn't EstimatedDecay or EstimatedHistogram
    if (values.length != buckets.length) {
        NoSpamLogger.getLogger(logger, 1, TimeUnit.HOURS).info("Not able to get buckets for {} {} type {}", name, values.length, snapshot.getClreplaced().getName());
        return bucketTags;
    }
    // output index
    int outputIndex = 0;
    long replacedulativeCount = 0;
    for (int i = 0; i < values.length; i++) {
        // Hit bucket edge
        if (outputIndex < latencyBuckets.length && buckets[i] > latencyBuckets[outputIndex].left) {
            String value = Long.toString(replacedulativeCount);
            msg.append(":").append(value);
            bucketTags.put(latencyBuckets[outputIndex++].right, value);
        }
        replacedulativeCount += values[i];
    }
    String total = Long.toString(replacedulativeCount);
    // Add any missing buckets + inf bucket
    while (outputIndex++ <= latencyBuckets.length) {
        msg.append(":").append(total);
        bucketTags.put(INF_BUCKET, total);
    }
    reportInternalWithoutFlush("PUTVAL", msg.toString());
    return bucketTags;
}

14 Source : LoadWorkflow.java
with Apache License 2.0
from datastax

private double getMeanRowSize() {
    double meanSize;
    try {
        LOGGER.debug("Sampling data...");
        Histogram sample = DataSizeSampler.sampleWrites(session.getContext(), Flux.merge(connector.read()).<Statement<?>>map(mapper).filter(BoundStatement.clreplaced::isInstance).take(1000).toIterable());
        if (sample.getCount() < 1000) {
            // sample too small, go with a common value
            LOGGER.debug("Data sample is too small: {}, discarding", sample.getCount());
            meanSize = _1_KB;
        } else {
            Snapshot snapshot = sample.getSnapshot();
            meanSize = snapshot.getMean();
            double standardDeviation = snapshot.getStdDev();
            double coefficientOfVariation = standardDeviation / meanSize;
            LOGGER.debug("Average record size in bytes: {}, std dev: {}, coefficientOfVariation: {}", meanSize, standardDeviation, coefficientOfVariation);
            if (coefficientOfVariation >= 1) {
                LOGGER.debug("Data sample is too spread out, discarding");
                meanSize = _1_KB;
            }
        }
    } catch (Exception e) {
        LOGGER.debug("Sampling failed: {}", ThrowableUtils.getSanitizedErrorMessage(e));
        meanSize = _1_KB;
    }
    return meanSize;
}

14 Source : WritesReportingExecutionListener.java
with Apache License 2.0
from datastax

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) {
    if (!sink.isEnabled()) {
        return;
    }
    Snapshot snapshot = timer.getSnapshot();
    long total = timer.getCount();
    String durationUnit = getDurationUnit();
    String rateUnit = getRateUnit();
    if (expectedTotal < 0) {
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount()));
    } else {
        float achieved = (float) total / (float) expectedTotal * 100f;
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount(), achieved));
    }
    double throughput = timer.getMeanRate();
    if (sent != null) {
        double sizeSent = sent.getMeanRate();
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit, convertRate(sizeSent / BYTES_PER_MB), rateUnit, throughput == 0 ? 0 : (sizeSent / BYTES_PER_KB) / throughput));
    } else {
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit));
    }
    sink.accept(String.format(latencyMessage, convertDuration(snapshot.getMean()), convertDuration(snapshot.get75thPercentile()), convertDuration(snapshot.get99thPercentile()), convertDuration(snapshot.get999thPercentile()), durationUnit));
}

14 Source : StatementsReportingExecutionListener.java
with Apache License 2.0
from datastax

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) {
    if (!sink.isEnabled()) {
        return;
    }
    Snapshot snapshot = timer.getSnapshot();
    long total = timer.getCount();
    String durationUnit = getDurationUnit();
    String rateUnit = getRateUnit();
    if (expectedTotal < 0) {
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount()));
    } else {
        float achieved = (float) total / (float) expectedTotal * 100f;
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount(), achieved));
    }
    double throughput = timer.getMeanRate();
    if (sent != null && received != null) {
        double sizeSent = sent.getMeanRate();
        double sizeReceived = received.getMeanRate();
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit, convertRate(sizeSent / BYTES_PER_MB), rateUnit, convertRate(sizeReceived / BYTES_PER_MB), rateUnit, throughput == 0 ? 0 : (sizeSent / BYTES_PER_KB) / throughput, throughput == 0 ? 0 : (sizeReceived / BYTES_PER_KB) / throughput));
    } else {
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit));
    }
    sink.accept(String.format(latencyMessage, convertDuration(snapshot.getMean()), convertDuration(snapshot.get75thPercentile()), convertDuration(snapshot.get99thPercentile()), convertDuration(snapshot.get999thPercentile()), durationUnit));
}

14 Source : ReadsReportingExecutionListener.java
with Apache License 2.0
from datastax

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) {
    if (!sink.isEnabled()) {
        return;
    }
    Snapshot snapshot = timer.getSnapshot();
    long total = timer.getCount();
    String durationUnit = getDurationUnit();
    String rateUnit = getRateUnit();
    if (expectedTotal < 0) {
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount()));
    } else {
        float achieved = (float) total / (float) expectedTotal * 100f;
        sink.accept(String.format(countMessage, total, successful.getCount(), failed.getCount(), inFlight.getCount(), achieved));
    }
    double throughput = timer.getMeanRate();
    if (received != null) {
        double sizeReceived = received.getMeanRate();
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit, convertRate(sizeReceived / BYTES_PER_MB), rateUnit, throughput == 0 ? 0 : (sizeReceived / BYTES_PER_KB) / throughput));
    } else {
        sink.accept(String.format(throughputMessage, convertRate(throughput), rateUnit));
    }
    sink.accept(String.format(latencyMessage, convertDuration(snapshot.getMean()), convertDuration(snapshot.get75thPercentile()), convertDuration(snapshot.get99thPercentile()), convertDuration(snapshot.get999thPercentile()), durationUnit));
}

See More Examples