From 2df8a7358ab53a5a133fa1fadfafe4a911ee1603 Mon Sep 17 00:00:00 2001 From: <> Date: Sat, 3 Aug 2024 02:41:54 +0000 Subject: [PATCH] Deployed 3eddfd4 with MkDocs version: 1.6.0 --- 404.html | 53 +- api/fetch/index.html | 53 +- api/graph/anonymization/index.html | 53 +- api/graph/axis-bounds/index.html | 53 +- api/graph/axis-scale/index.html | 53 +- api/graph/basics/index.html | 53 +- api/graph/color-palettes/index.html | 53 +- api/graph/examples/index.html | 53 +- api/graph/graph/index.html | 53 +- api/graph/heatmap/index.html | 53 +- api/graph/layout/index.html | 53 +- api/graph/legends/index.html | 53 +- api/graph/line-attributes/index.html | 53 +- api/graph/line-styles/index.html | 53 +- api/graph/multi-y/index.html | 53 +- api/graph/outputs/index.html | 53 +- api/graph/tick/index.html | 53 +- api/graph/time-shift/index.html | 53 +- api/graph/time-zone/index.html | 53 +- api/graph/vision/index.html | 53 +- api/tags/index.html | 53 +- api/time-parameters/index.html | 53 +- asl/alerting-expressions/index.html | 53 +- asl/alerting-philosophy/index.html | 53 +- asl/des/index.html | 53 +- asl/index.html | 53 +- asl/ref/-rot/index.html | 53 +- asl/ref/2over/index.html | 53 +- asl/ref/abs/index.html | 53 +- asl/ref/add/index.html | 53 +- asl/ref/all/index.html | 53 +- asl/ref/alpha/index.html | 53 +- asl/ref/and/index.html | 53 +- asl/ref/area/index.html | 53 +- asl/ref/as/index.html | 53 +- asl/ref/avg/index.html | 53 +- asl/ref/axis/index.html | 53 +- asl/ref/bottomk-others-avg/index.html | 53 +- asl/ref/bottomk-others-max/index.html | 53 +- asl/ref/bottomk-others-min/index.html | 53 +- asl/ref/bottomk-others-sum/index.html | 53 +- asl/ref/bottomk/index.html | 53 +- asl/ref/by/index.html | 53 +- asl/ref/call/index.html | 53 +- asl/ref/cf-avg/index.html | 53 +- asl/ref/cf-max/index.html | 53 +- asl/ref/cf-min/index.html | 53 +- asl/ref/cf-sum/index.html | 53 +- asl/ref/cg/index.html | 53 +- asl/ref/clamp-max/index.html | 53 +- asl/ref/clamp-min/index.html | 53 +- asl/ref/clear/index.html | 53 +- asl/ref/color/index.html | 53 +- asl/ref/const/index.html | 53 +- asl/ref/contains/index.html | 53 +- asl/ref/count/index.html | 53 +- asl/ref/cq/index.html | 53 +- asl/ref/decode/index.html | 53 +- asl/ref/delay/index.html | 53 +- asl/ref/depth/index.html | 53 +- asl/ref/derivative/index.html | 53 +- asl/ref/des-epic-signal/index.html | 53 +- asl/ref/des-epic-viz/index.html | 53 +- asl/ref/des-fast/index.html | 53 +- asl/ref/des-simple/index.html | 53 +- asl/ref/des-slow/index.html | 53 +- asl/ref/des-slower/index.html | 53 +- asl/ref/des/index.html | 53 +- asl/ref/dist-avg/index.html | 53 +- asl/ref/dist-max/index.html | 53 +- asl/ref/dist-stddev/index.html | 53 +- asl/ref/div/index.html | 53 +- asl/ref/drop/index.html | 53 +- asl/ref/dup/index.html | 53 +- asl/ref/each/index.html | 53 +- asl/ref/ends/index.html | 53 +- asl/ref/eq/index.html | 53 +- asl/ref/eureka-avg/index.html | 53 +- asl/ref/fadd/index.html | 53 +- asl/ref/false/index.html | 53 +- asl/ref/fcall/index.html | 53 +- asl/ref/fdiv/index.html | 53 +- asl/ref/filter/index.html | 53 +- asl/ref/fmul/index.html | 53 +- asl/ref/format/index.html | 53 +- asl/ref/freeze/index.html | 53 +- asl/ref/fsub/index.html | 53 +- asl/ref/ge/index.html | 53 +- asl/ref/get/index.html | 53 +- asl/ref/gt/index.html | 53 +- asl/ref/has/index.html | 53 +- asl/ref/head/index.html | 53 +- asl/ref/heatmap/index.html | 53 +- asl/ref/in/index.html | 53 +- asl/ref/integral/index.html | 53 +- asl/ref/le/index.html | 53 +- asl/ref/legend/index.html | 53 +- asl/ref/limit/index.html | 53 +- asl/ref/line/index.html | 53 +- asl/ref/list/index.html | 53 +- asl/ref/ls/index.html | 53 +- asl/ref/lt/index.html | 53 +- asl/ref/lw/index.html | 53 +- asl/ref/map/index.html | 53 +- asl/ref/max/index.html | 53 +- asl/ref/median/index.html | 53 +- asl/ref/min/index.html | 53 +- asl/ref/mul/index.html | 53 +- asl/ref/named-rewrite/index.html | 53 +- asl/ref/ndrop/index.html | 53 +- asl/ref/neg/index.html | 53 +- asl/ref/nip/index.html | 53 +- asl/ref/nlist/index.html | 53 +- asl/ref/node-avg/index.html | 53 +- asl/ref/not/index.html | 53 +- asl/ref/offset/index.html | 53 +- asl/ref/or/index.html | 53 +- asl/ref/order/index.html | 53 +- asl/ref/over/index.html | 53 +- asl/ref/palette/index.html | 53 +- asl/ref/pct/index.html | 53 +- asl/ref/per-step/index.html | 53 +- asl/ref/percentiles-heatmap/index.html | 53 +- asl/ref/percentiles/index.html | 53 +- asl/ref/pick/index.html | 53 +- asl/ref/pow/index.html | 53 +- asl/ref/random/index.html | 53 +- asl/ref/re/index.html | 53 +- asl/ref/reic/index.html | 53 +- asl/ref/roll/index.html | 53 +- asl/ref/rolling-count/index.html | 53 +- asl/ref/rolling-max/index.html | 53 +- asl/ref/rolling-mean/index.html | 53 +- asl/ref/rolling-min/index.html | 53 +- asl/ref/rolling-sum/index.html | 53 +- asl/ref/rot/index.html | 53 +- asl/ref/s/index.html | 53 +- asl/ref/sdes-fast/index.html | 53 +- asl/ref/sdes-simple/index.html | 53 +- asl/ref/sdes-slow/index.html | 53 +- asl/ref/sdes-slower/index.html | 53 +- asl/ref/sdes/index.html | 53 +- asl/ref/set/index.html | 53 +- asl/ref/sort/index.html | 53 +- asl/ref/sqrt/index.html | 53 +- asl/ref/srandom/index.html | 53 +- asl/ref/sset/index.html | 53 +- asl/ref/stack/index.html | 53 +- asl/ref/starts/index.html | 53 +- asl/ref/stat-avg-mf/index.html | 53 +- asl/ref/stat-avg/index.html | 53 +- asl/ref/stat-count/index.html | 53 +- asl/ref/stat-last/index.html | 53 +- asl/ref/stat-max-mf/index.html | 53 +- asl/ref/stat-max/index.html | 53 +- asl/ref/stat-min-mf/index.html | 53 +- asl/ref/stat-min/index.html | 53 +- asl/ref/stat-total/index.html | 53 +- asl/ref/stat/index.html | 53 +- asl/ref/stddev/index.html | 53 +- asl/ref/sub/index.html | 53 +- asl/ref/sum/index.html | 53 +- asl/ref/swap/index.html | 53 +- asl/ref/time-span/index.html | 53 +- asl/ref/time/index.html | 53 +- asl/ref/topk-others-avg/index.html | 53 +- asl/ref/topk-others-max/index.html | 53 +- asl/ref/topk-others-min/index.html | 53 +- asl/ref/topk-others-sum/index.html | 53 +- asl/ref/topk/index.html | 53 +- asl/ref/trend/index.html | 53 +- asl/ref/true/index.html | 53 +- asl/ref/tuck/index.html | 53 +- asl/ref/vspan/index.html | 53 +- asl/tutorial/index.html | 53 +- concepts/consolidation/index.html | 53 +- concepts/naming/index.html | 53 +- concepts/normalization/index.html | 53 +- concepts/time-series/index.html | 53 +- getting-started/index.html | 53 +- index.html | 53 +- overview/index.html | 53 +- search/search_index.json | 2 +- sitemap.xml | 505 +- sitemap.xml.gz | Bin 1603 -> 1614 bytes spectator/agent/metrics/index.html | 53 +- spectator/agent/usage/index.html | 53 +- spectator/core/clock/index.html | 53 +- spectator/core/meters/counter/index.html | 53 +- spectator/core/meters/dist-summary/index.html | 53 +- spectator/core/meters/gauge/index.html | 53 +- spectator/core/meters/timer/index.html | 53 +- spectator/index.html | 53 +- spectator/lang/cpp/usage/index.html | 53 +- spectator/lang/go/migrations/index.html | 53 +- spectator/lang/go/usage/index.html | 66 +- .../lang/java/ext/jvm-buffer-pools/index.html | 53 +- .../lang/java/ext/jvm-classloading/index.html | 53 +- .../lang/java/ext/jvm-compilation/index.html | 53 +- .../lang/java/ext/jvm-gc-causes/index.html | 53 +- spectator/lang/java/ext/jvm-gc/index.html | 53 +- .../lang/java/ext/jvm-memory-pools/index.html | 53 +- .../lang/java/ext/jvm-safepoint/index.html | 53 +- .../lang/java/ext/jvm-threads/index.html | 53 +- spectator/lang/java/ext/log4j1/index.html | 53 +- spectator/lang/java/ext/log4j2/index.html | 53 +- .../lang/java/ext/placeholders/index.html | 53 +- .../lang/java/ext/thread-pools/index.html | 53 +- spectator/lang/java/meters/counter/index.html | 53 +- .../lang/java/meters/dist-summary/index.html | 53 +- spectator/lang/java/meters/gauge/index.html | 53 +- .../java/meters/percentile-timer/index.html | 53 +- spectator/lang/java/meters/timer/index.html | 53 +- .../lang/java/registry/metrics3/index.html | 53 +- .../lang/java/registry/overview/index.html | 53 +- .../lang/java/servo-migration/index.html | 53 +- spectator/lang/java/testing/index.html | 53 +- spectator/lang/java/usage/index.html | 53 +- .../lang/nodejs/ext/nodejs-cpu/index.html | 53 +- .../nodejs/ext/nodejs-eventloop/index.html | 53 +- .../ext/nodejs-filedescriptor/index.html | 53 +- .../lang/nodejs/ext/nodejs-gc/index.html | 53 +- .../lang/nodejs/ext/nodejs-heap/index.html | 53 +- .../nodejs/ext/nodejs-heapspace/index.html | 53 +- .../lang/nodejs/ext/nodejs-memory/index.html | 53 +- .../lang/nodejs/meters/counter/index.html | 59 +- .../nodejs/meters/dist-summary/index.html | 59 +- spectator/lang/nodejs/meters/gauge/index.html | 59 +- .../nodejs/meters/percentile-timer/index.html | 59 +- spectator/lang/nodejs/meters/timer/index.html | 59 +- spectator/lang/nodejs/usage/index.html | 53 +- spectator/lang/overview/index.html | 53 +- spectator/lang/py/meters/age-gauge/index.html | 77 +- spectator/lang/py/meters/counter/index.html | 83 +- .../lang/py/meters/dist-summary/index.html | 67 +- spectator/lang/py/meters/gauge/index.html | 69 +- spectator/lang/py/meters/max-gauge/index.html | 69 +- .../index.html | 114 +- .../index.html | 73 +- .../index.html | 73 +- .../py/meters/percentile-timer/index.html | 7460 +++++++++++++++++ spectator/lang/py/meters/timer/index.html | 86 +- spectator/lang/py/migrations/index.html | 387 +- spectator/lang/py/usage/index.html | 415 +- spectator/lang/rb/deprecated/index.html | 53 +- .../patterns/cardinality-limiter/index.html | 53 +- spectator/patterns/gauge-poller/index.html | 53 +- .../patterns/interval-counter/index.html | 53 +- spectator/patterns/long-task-timer/index.html | 53 +- .../patterns/percentile-timer/index.html | 53 +- spectator/patterns/polled-meter/index.html | 53 +- spectator/specs/ipc/index.html | 53 +- 252 files changed, 17645 insertions(+), 4439 deletions(-) rename spectator/lang/py/meters/{pct-timer => monotonic-counter-uint}/index.html (97%) rename spectator/lang/py/meters/{mono-counter => monotonic-counter}/index.html (98%) rename spectator/lang/py/meters/{pct-dist-summary => percentile-dist-summary}/index.html (98%) create mode 100644 spectator/lang/py/meters/percentile-timer/index.html diff --git a/404.html b/404.html index bf454fd2..1f41109e 100644 --- a/404.html +++ b/404.html @@ -6538,7 +6538,7 @@ - Counters + Counter @@ -6559,7 +6559,7 @@ - Distribution Summaries + Distribution Summary @@ -6580,7 +6580,7 @@ - Gauges + Gauge @@ -6601,7 +6601,7 @@ - Percentile Timers + Percentile Timer @@ -6622,7 +6622,7 @@ - Timers + Timer @@ -6756,7 +6756,7 @@ - Age Gauges + Age Gauge @@ -6777,7 +6777,7 @@ - Counters + Counter @@ -6798,7 +6798,7 @@ - Distribution Summaries + Distribution Summary @@ -6840,7 +6840,7 @@ - Max Gauges + Max Gauge @@ -6857,11 +6857,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6878,11 +6878,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6899,11 +6899,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6924,7 +6945,7 @@ - Timers + Timer diff --git a/api/fetch/index.html b/api/fetch/index.html index 1fadf9a0..ba11fc52 100644 --- a/api/fetch/index.html +++ b/api/fetch/index.html @@ -6540,7 +6540,7 @@ - Counters + Counter @@ -6561,7 +6561,7 @@ - Distribution Summaries + Distribution Summary @@ -6582,7 +6582,7 @@ - Gauges + Gauge @@ -6603,7 +6603,7 @@ - Percentile Timers + Percentile Timer @@ -6624,7 +6624,7 @@ - Timers + Timer @@ -6758,7 +6758,7 @@ - Age Gauges + Age Gauge @@ -6779,7 +6779,7 @@ - Counters + Counter @@ -6800,7 +6800,7 @@ - Distribution Summaries + Distribution Summary @@ -6842,7 +6842,7 @@ - Max Gauges + Max Gauge @@ -6859,11 +6859,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6880,11 +6880,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6901,11 +6901,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6926,7 +6947,7 @@ - Timers + Timer diff --git a/api/graph/anonymization/index.html b/api/graph/anonymization/index.html index ab38ff15..6f15ff0c 100644 --- a/api/graph/anonymization/index.html +++ b/api/graph/anonymization/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/api/graph/axis-bounds/index.html b/api/graph/axis-bounds/index.html index 45fa8c36..f66c0e70 100644 --- a/api/graph/axis-bounds/index.html +++ b/api/graph/axis-bounds/index.html @@ -6659,7 +6659,7 @@ - Counters + Counter @@ -6680,7 +6680,7 @@ - Distribution Summaries + Distribution Summary @@ -6701,7 +6701,7 @@ - Gauges + Gauge @@ -6722,7 +6722,7 @@ - Percentile Timers + Percentile Timer @@ -6743,7 +6743,7 @@ - Timers + Timer @@ -6877,7 +6877,7 @@ - Age Gauges + Age Gauge @@ -6898,7 +6898,7 @@ - Counters + Counter @@ -6919,7 +6919,7 @@ - Distribution Summaries + Distribution Summary @@ -6961,7 +6961,7 @@ - Max Gauges + Max Gauge @@ -6978,11 +6978,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6999,11 +6999,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7020,11 +7020,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7045,7 +7066,7 @@ - Timers + Timer diff --git a/api/graph/axis-scale/index.html b/api/graph/axis-scale/index.html index ec21b922..4dfc5d20 100644 --- a/api/graph/axis-scale/index.html +++ b/api/graph/axis-scale/index.html @@ -6641,7 +6641,7 @@ - Counters + Counter @@ -6662,7 +6662,7 @@ - Distribution Summaries + Distribution Summary @@ -6683,7 +6683,7 @@ - Gauges + Gauge @@ -6704,7 +6704,7 @@ - Percentile Timers + Percentile Timer @@ -6725,7 +6725,7 @@ - Timers + Timer @@ -6859,7 +6859,7 @@ - Age Gauges + Age Gauge @@ -6880,7 +6880,7 @@ - Counters + Counter @@ -6901,7 +6901,7 @@ - Distribution Summaries + Distribution Summary @@ -6943,7 +6943,7 @@ - Max Gauges + Max Gauge @@ -6960,11 +6960,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6981,11 +6981,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7002,11 +7002,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7027,7 +7048,7 @@ - Timers + Timer diff --git a/api/graph/basics/index.html b/api/graph/basics/index.html index 7f942f7d..518bcfaa 100644 --- a/api/graph/basics/index.html +++ b/api/graph/basics/index.html @@ -6674,7 +6674,7 @@ - Counters + Counter @@ -6695,7 +6695,7 @@ - Distribution Summaries + Distribution Summary @@ -6716,7 +6716,7 @@ - Gauges + Gauge @@ -6737,7 +6737,7 @@ - Percentile Timers + Percentile Timer @@ -6758,7 +6758,7 @@ - Timers + Timer @@ -6892,7 +6892,7 @@ - Age Gauges + Age Gauge @@ -6913,7 +6913,7 @@ - Counters + Counter @@ -6934,7 +6934,7 @@ - Distribution Summaries + Distribution Summary @@ -6976,7 +6976,7 @@ - Max Gauges + Max Gauge @@ -6993,11 +6993,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7014,11 +7014,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7035,11 +7035,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7060,7 +7081,7 @@ - Timers + Timer diff --git a/api/graph/color-palettes/index.html b/api/graph/color-palettes/index.html index 0976a1e1..898bc61b 100644 --- a/api/graph/color-palettes/index.html +++ b/api/graph/color-palettes/index.html @@ -6677,7 +6677,7 @@ - Counters + Counter @@ -6698,7 +6698,7 @@ - Distribution Summaries + Distribution Summary @@ -6719,7 +6719,7 @@ - Gauges + Gauge @@ -6740,7 +6740,7 @@ - Percentile Timers + Percentile Timer @@ -6761,7 +6761,7 @@ - Timers + Timer @@ -6895,7 +6895,7 @@ - Age Gauges + Age Gauge @@ -6916,7 +6916,7 @@ - Counters + Counter @@ -6937,7 +6937,7 @@ - Distribution Summaries + Distribution Summary @@ -6979,7 +6979,7 @@ - Max Gauges + Max Gauge @@ -6996,11 +6996,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7017,11 +7017,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7038,11 +7038,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7063,7 +7084,7 @@ - Timers + Timer diff --git a/api/graph/examples/index.html b/api/graph/examples/index.html index c8561fb5..a9900000 100644 --- a/api/graph/examples/index.html +++ b/api/graph/examples/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/api/graph/graph/index.html b/api/graph/graph/index.html index 23e71877..6df8cfb0 100644 --- a/api/graph/graph/index.html +++ b/api/graph/graph/index.html @@ -6701,7 +6701,7 @@ - Counters + Counter @@ -6722,7 +6722,7 @@ - Distribution Summaries + Distribution Summary @@ -6743,7 +6743,7 @@ - Gauges + Gauge @@ -6764,7 +6764,7 @@ - Percentile Timers + Percentile Timer @@ -6785,7 +6785,7 @@ - Timers + Timer @@ -6919,7 +6919,7 @@ - Age Gauges + Age Gauge @@ -6940,7 +6940,7 @@ - Counters + Counter @@ -6961,7 +6961,7 @@ - Distribution Summaries + Distribution Summary @@ -7003,7 +7003,7 @@ - Max Gauges + Max Gauge @@ -7020,11 +7020,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7041,11 +7041,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7062,11 +7062,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7087,7 +7108,7 @@ - Timers + Timer diff --git a/api/graph/heatmap/index.html b/api/graph/heatmap/index.html index 5f7eb072..b07b023d 100644 --- a/api/graph/heatmap/index.html +++ b/api/graph/heatmap/index.html @@ -6665,7 +6665,7 @@ - Counters + Counter @@ -6686,7 +6686,7 @@ - Distribution Summaries + Distribution Summary @@ -6707,7 +6707,7 @@ - Gauges + Gauge @@ -6728,7 +6728,7 @@ - Percentile Timers + Percentile Timer @@ -6749,7 +6749,7 @@ - Timers + Timer @@ -6883,7 +6883,7 @@ - Age Gauges + Age Gauge @@ -6904,7 +6904,7 @@ - Counters + Counter @@ -6925,7 +6925,7 @@ - Distribution Summaries + Distribution Summary @@ -6967,7 +6967,7 @@ - Max Gauges + Max Gauge @@ -6984,11 +6984,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7005,11 +7005,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7026,11 +7026,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7051,7 +7072,7 @@ - Timers + Timer diff --git a/api/graph/layout/index.html b/api/graph/layout/index.html index ee5534a2..802a5a6b 100644 --- a/api/graph/layout/index.html +++ b/api/graph/layout/index.html @@ -6674,7 +6674,7 @@ - Counters + Counter @@ -6695,7 +6695,7 @@ - Distribution Summaries + Distribution Summary @@ -6716,7 +6716,7 @@ - Gauges + Gauge @@ -6737,7 +6737,7 @@ - Percentile Timers + Percentile Timer @@ -6758,7 +6758,7 @@ - Timers + Timer @@ -6892,7 +6892,7 @@ - Age Gauges + Age Gauge @@ -6913,7 +6913,7 @@ - Counters + Counter @@ -6934,7 +6934,7 @@ - Distribution Summaries + Distribution Summary @@ -6976,7 +6976,7 @@ - Max Gauges + Max Gauge @@ -6993,11 +6993,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7014,11 +7014,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7035,11 +7035,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7060,7 +7081,7 @@ - Timers + Timer diff --git a/api/graph/legends/index.html b/api/graph/legends/index.html index 79f8b17f..f58deea7 100644 --- a/api/graph/legends/index.html +++ b/api/graph/legends/index.html @@ -6692,7 +6692,7 @@ - Counters + Counter @@ -6713,7 +6713,7 @@ - Distribution Summaries + Distribution Summary @@ -6734,7 +6734,7 @@ - Gauges + Gauge @@ -6755,7 +6755,7 @@ - Percentile Timers + Percentile Timer @@ -6776,7 +6776,7 @@ - Timers + Timer @@ -6910,7 +6910,7 @@ - Age Gauges + Age Gauge @@ -6931,7 +6931,7 @@ - Counters + Counter @@ -6952,7 +6952,7 @@ - Distribution Summaries + Distribution Summary @@ -6994,7 +6994,7 @@ - Max Gauges + Max Gauge @@ -7011,11 +7011,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7032,11 +7032,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7053,11 +7053,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7078,7 +7099,7 @@ - Timers + Timer diff --git a/api/graph/line-attributes/index.html b/api/graph/line-attributes/index.html index 3331956f..9a4eb53b 100644 --- a/api/graph/line-attributes/index.html +++ b/api/graph/line-attributes/index.html @@ -6623,7 +6623,7 @@ - Counters + Counter @@ -6644,7 +6644,7 @@ - Distribution Summaries + Distribution Summary @@ -6665,7 +6665,7 @@ - Gauges + Gauge @@ -6686,7 +6686,7 @@ - Percentile Timers + Percentile Timer @@ -6707,7 +6707,7 @@ - Timers + Timer @@ -6841,7 +6841,7 @@ - Age Gauges + Age Gauge @@ -6862,7 +6862,7 @@ - Counters + Counter @@ -6883,7 +6883,7 @@ - Distribution Summaries + Distribution Summary @@ -6925,7 +6925,7 @@ - Max Gauges + Max Gauge @@ -6942,11 +6942,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6963,11 +6963,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6984,11 +6984,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7009,7 +7030,7 @@ - Timers + Timer diff --git a/api/graph/line-styles/index.html b/api/graph/line-styles/index.html index 36c71d6b..a19ed5bc 100644 --- a/api/graph/line-styles/index.html +++ b/api/graph/line-styles/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/api/graph/multi-y/index.html b/api/graph/multi-y/index.html index ffa0141b..5446b941 100644 --- a/api/graph/multi-y/index.html +++ b/api/graph/multi-y/index.html @@ -6632,7 +6632,7 @@ - Counters + Counter @@ -6653,7 +6653,7 @@ - Distribution Summaries + Distribution Summary @@ -6674,7 +6674,7 @@ - Gauges + Gauge @@ -6695,7 +6695,7 @@ - Percentile Timers + Percentile Timer @@ -6716,7 +6716,7 @@ - Timers + Timer @@ -6850,7 +6850,7 @@ - Age Gauges + Age Gauge @@ -6871,7 +6871,7 @@ - Counters + Counter @@ -6892,7 +6892,7 @@ - Distribution Summaries + Distribution Summary @@ -6934,7 +6934,7 @@ - Max Gauges + Max Gauge @@ -6951,11 +6951,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6972,11 +6972,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6993,11 +6993,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7018,7 +7039,7 @@ - Timers + Timer diff --git a/api/graph/outputs/index.html b/api/graph/outputs/index.html index 42704d67..de4cd9fd 100644 --- a/api/graph/outputs/index.html +++ b/api/graph/outputs/index.html @@ -6650,7 +6650,7 @@ - Counters + Counter @@ -6671,7 +6671,7 @@ - Distribution Summaries + Distribution Summary @@ -6692,7 +6692,7 @@ - Gauges + Gauge @@ -6713,7 +6713,7 @@ - Percentile Timers + Percentile Timer @@ -6734,7 +6734,7 @@ - Timers + Timer @@ -6868,7 +6868,7 @@ - Age Gauges + Age Gauge @@ -6889,7 +6889,7 @@ - Counters + Counter @@ -6910,7 +6910,7 @@ - Distribution Summaries + Distribution Summary @@ -6952,7 +6952,7 @@ - Max Gauges + Max Gauge @@ -6969,11 +6969,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6990,11 +6990,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7011,11 +7011,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7036,7 +7057,7 @@ - Timers + Timer diff --git a/api/graph/tick/index.html b/api/graph/tick/index.html index 0cd43d3a..1199c9f3 100644 --- a/api/graph/tick/index.html +++ b/api/graph/tick/index.html @@ -6641,7 +6641,7 @@ - Counters + Counter @@ -6662,7 +6662,7 @@ - Distribution Summaries + Distribution Summary @@ -6683,7 +6683,7 @@ - Gauges + Gauge @@ -6704,7 +6704,7 @@ - Percentile Timers + Percentile Timer @@ -6725,7 +6725,7 @@ - Timers + Timer @@ -6859,7 +6859,7 @@ - Age Gauges + Age Gauge @@ -6880,7 +6880,7 @@ - Counters + Counter @@ -6901,7 +6901,7 @@ - Distribution Summaries + Distribution Summary @@ -6943,7 +6943,7 @@ - Max Gauges + Max Gauge @@ -6960,11 +6960,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6981,11 +6981,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7002,11 +7002,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7027,7 +7048,7 @@ - Timers + Timer diff --git a/api/graph/time-shift/index.html b/api/graph/time-shift/index.html index 4aa0efcd..ba96135a 100644 --- a/api/graph/time-shift/index.html +++ b/api/graph/time-shift/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/api/graph/time-zone/index.html b/api/graph/time-zone/index.html index 6c265f03..2c11850d 100644 --- a/api/graph/time-zone/index.html +++ b/api/graph/time-zone/index.html @@ -6623,7 +6623,7 @@ - Counters + Counter @@ -6644,7 +6644,7 @@ - Distribution Summaries + Distribution Summary @@ -6665,7 +6665,7 @@ - Gauges + Gauge @@ -6686,7 +6686,7 @@ - Percentile Timers + Percentile Timer @@ -6707,7 +6707,7 @@ - Timers + Timer @@ -6841,7 +6841,7 @@ - Age Gauges + Age Gauge @@ -6862,7 +6862,7 @@ - Counters + Counter @@ -6883,7 +6883,7 @@ - Distribution Summaries + Distribution Summary @@ -6925,7 +6925,7 @@ - Max Gauges + Max Gauge @@ -6942,11 +6942,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6963,11 +6963,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6984,11 +6984,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7009,7 +7030,7 @@ - Timers + Timer diff --git a/api/graph/vision/index.html b/api/graph/vision/index.html index 7d95d57a..89d0a0ed 100644 --- a/api/graph/vision/index.html +++ b/api/graph/vision/index.html @@ -6677,7 +6677,7 @@ - Counters + Counter @@ -6698,7 +6698,7 @@ - Distribution Summaries + Distribution Summary @@ -6719,7 +6719,7 @@ - Gauges + Gauge @@ -6740,7 +6740,7 @@ - Percentile Timers + Percentile Timer @@ -6761,7 +6761,7 @@ - Timers + Timer @@ -6895,7 +6895,7 @@ - Age Gauges + Age Gauge @@ -6916,7 +6916,7 @@ - Counters + Counter @@ -6937,7 +6937,7 @@ - Distribution Summaries + Distribution Summary @@ -6979,7 +6979,7 @@ - Max Gauges + Max Gauge @@ -6996,11 +6996,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7017,11 +7017,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7038,11 +7038,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7063,7 +7084,7 @@ - Timers + Timer diff --git a/api/tags/index.html b/api/tags/index.html index 1bb0761b..eae25398 100644 --- a/api/tags/index.html +++ b/api/tags/index.html @@ -6663,7 +6663,7 @@ - Counters + Counter @@ -6684,7 +6684,7 @@ - Distribution Summaries + Distribution Summary @@ -6705,7 +6705,7 @@ - Gauges + Gauge @@ -6726,7 +6726,7 @@ - Percentile Timers + Percentile Timer @@ -6747,7 +6747,7 @@ - Timers + Timer @@ -6881,7 +6881,7 @@ - Age Gauges + Age Gauge @@ -6902,7 +6902,7 @@ - Counters + Counter @@ -6923,7 +6923,7 @@ - Distribution Summaries + Distribution Summary @@ -6965,7 +6965,7 @@ - Max Gauges + Max Gauge @@ -6982,11 +6982,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7003,11 +7003,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7024,11 +7024,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7049,7 +7070,7 @@ - Timers + Timer diff --git a/api/time-parameters/index.html b/api/time-parameters/index.html index b24da79e..dd661a1c 100644 --- a/api/time-parameters/index.html +++ b/api/time-parameters/index.html @@ -6717,7 +6717,7 @@ - Counters + Counter @@ -6738,7 +6738,7 @@ - Distribution Summaries + Distribution Summary @@ -6759,7 +6759,7 @@ - Gauges + Gauge @@ -6780,7 +6780,7 @@ - Percentile Timers + Percentile Timer @@ -6801,7 +6801,7 @@ - Timers + Timer @@ -6935,7 +6935,7 @@ - Age Gauges + Age Gauge @@ -6956,7 +6956,7 @@ - Counters + Counter @@ -6977,7 +6977,7 @@ - Distribution Summaries + Distribution Summary @@ -7019,7 +7019,7 @@ - Max Gauges + Max Gauge @@ -7036,11 +7036,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7057,11 +7057,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7078,11 +7078,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7103,7 +7124,7 @@ - Timers + Timer diff --git a/asl/alerting-expressions/index.html b/asl/alerting-expressions/index.html index 559f7bd8..977bcbd2 100644 --- a/asl/alerting-expressions/index.html +++ b/asl/alerting-expressions/index.html @@ -6637,7 +6637,7 @@ - Counters + Counter @@ -6658,7 +6658,7 @@ - Distribution Summaries + Distribution Summary @@ -6679,7 +6679,7 @@ - Gauges + Gauge @@ -6700,7 +6700,7 @@ - Percentile Timers + Percentile Timer @@ -6721,7 +6721,7 @@ - Timers + Timer @@ -6855,7 +6855,7 @@ - Age Gauges + Age Gauge @@ -6876,7 +6876,7 @@ - Counters + Counter @@ -6897,7 +6897,7 @@ - Distribution Summaries + Distribution Summary @@ -6939,7 +6939,7 @@ - Max Gauges + Max Gauge @@ -6956,11 +6956,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6977,11 +6977,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6998,11 +6998,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7023,7 +7044,7 @@ - Timers + Timer diff --git a/asl/alerting-philosophy/index.html b/asl/alerting-philosophy/index.html index e316b79d..21f6a8f4 100644 --- a/asl/alerting-philosophy/index.html +++ b/asl/alerting-philosophy/index.html @@ -6709,7 +6709,7 @@ - Counters + Counter @@ -6730,7 +6730,7 @@ - Distribution Summaries + Distribution Summary @@ -6751,7 +6751,7 @@ - Gauges + Gauge @@ -6772,7 +6772,7 @@ - Percentile Timers + Percentile Timer @@ -6793,7 +6793,7 @@ - Timers + Timer @@ -6927,7 +6927,7 @@ - Age Gauges + Age Gauge @@ -6948,7 +6948,7 @@ - Counters + Counter @@ -6969,7 +6969,7 @@ - Distribution Summaries + Distribution Summary @@ -7011,7 +7011,7 @@ - Max Gauges + Max Gauge @@ -7028,11 +7028,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7049,11 +7049,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7070,11 +7070,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7095,7 +7116,7 @@ - Timers + Timer diff --git a/asl/des/index.html b/asl/des/index.html index 3c49c41e..9edda9ae 100644 --- a/asl/des/index.html +++ b/asl/des/index.html @@ -6661,7 +6661,7 @@ - Counters + Counter @@ -6682,7 +6682,7 @@ - Distribution Summaries + Distribution Summary @@ -6703,7 +6703,7 @@ - Gauges + Gauge @@ -6724,7 +6724,7 @@ - Percentile Timers + Percentile Timer @@ -6745,7 +6745,7 @@ - Timers + Timer @@ -6879,7 +6879,7 @@ - Age Gauges + Age Gauge @@ -6900,7 +6900,7 @@ - Counters + Counter @@ -6921,7 +6921,7 @@ - Distribution Summaries + Distribution Summary @@ -6963,7 +6963,7 @@ - Max Gauges + Max Gauge @@ -6980,11 +6980,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7001,11 +7001,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7022,11 +7022,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7047,7 +7068,7 @@ - Timers + Timer diff --git a/asl/index.html b/asl/index.html index 1c3a82d7..dd2c015d 100644 --- a/asl/index.html +++ b/asl/index.html @@ -6540,7 +6540,7 @@ - Counters + Counter @@ -6561,7 +6561,7 @@ - Distribution Summaries + Distribution Summary @@ -6582,7 +6582,7 @@ - Gauges + Gauge @@ -6603,7 +6603,7 @@ - Percentile Timers + Percentile Timer @@ -6624,7 +6624,7 @@ - Timers + Timer @@ -6758,7 +6758,7 @@ - Age Gauges + Age Gauge @@ -6779,7 +6779,7 @@ - Counters + Counter @@ -6800,7 +6800,7 @@ - Distribution Summaries + Distribution Summary @@ -6842,7 +6842,7 @@ - Max Gauges + Max Gauge @@ -6859,11 +6859,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6880,11 +6880,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6901,11 +6901,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6926,7 +6947,7 @@ - Timers + Timer diff --git a/asl/ref/-rot/index.html b/asl/ref/-rot/index.html index 4506491d..5fc28520 100644 --- a/asl/ref/-rot/index.html +++ b/asl/ref/-rot/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/2over/index.html b/asl/ref/2over/index.html index cdfd75a6..15841294 100644 --- a/asl/ref/2over/index.html +++ b/asl/ref/2over/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/abs/index.html b/asl/ref/abs/index.html index a77344ba..6ed38ab9 100644 --- a/asl/ref/abs/index.html +++ b/asl/ref/abs/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/add/index.html b/asl/ref/add/index.html index 363e5b5a..24591a67 100644 --- a/asl/ref/add/index.html +++ b/asl/ref/add/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/all/index.html b/asl/ref/all/index.html index 0d9d71b1..9fff5371 100644 --- a/asl/ref/all/index.html +++ b/asl/ref/all/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/alpha/index.html b/asl/ref/alpha/index.html index cdbaef71..c4259c3a 100644 --- a/asl/ref/alpha/index.html +++ b/asl/ref/alpha/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/and/index.html b/asl/ref/and/index.html index 58b4011e..6b36beb3 100644 --- a/asl/ref/and/index.html +++ b/asl/ref/and/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/area/index.html b/asl/ref/area/index.html index 5078b76e..1ea66c83 100644 --- a/asl/ref/area/index.html +++ b/asl/ref/area/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/as/index.html b/asl/ref/as/index.html index e6375d13..0ca1592b 100644 --- a/asl/ref/as/index.html +++ b/asl/ref/as/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/avg/index.html b/asl/ref/avg/index.html index 2e6e9d1d..db85943f 100644 --- a/asl/ref/avg/index.html +++ b/asl/ref/avg/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/axis/index.html b/asl/ref/axis/index.html index d16f2a04..0321ac2a 100644 --- a/asl/ref/axis/index.html +++ b/asl/ref/axis/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/bottomk-others-avg/index.html b/asl/ref/bottomk-others-avg/index.html index df6f08c3..d260c9ed 100644 --- a/asl/ref/bottomk-others-avg/index.html +++ b/asl/ref/bottomk-others-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/bottomk-others-max/index.html b/asl/ref/bottomk-others-max/index.html index d9bd497c..b153d9f1 100644 --- a/asl/ref/bottomk-others-max/index.html +++ b/asl/ref/bottomk-others-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/bottomk-others-min/index.html b/asl/ref/bottomk-others-min/index.html index 885fcd28..0eaffcc2 100644 --- a/asl/ref/bottomk-others-min/index.html +++ b/asl/ref/bottomk-others-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/bottomk-others-sum/index.html b/asl/ref/bottomk-others-sum/index.html index db798e92..6d8d9207 100644 --- a/asl/ref/bottomk-others-sum/index.html +++ b/asl/ref/bottomk-others-sum/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/bottomk/index.html b/asl/ref/bottomk/index.html index 74a7aded..c90aa964 100644 --- a/asl/ref/bottomk/index.html +++ b/asl/ref/bottomk/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/by/index.html b/asl/ref/by/index.html index 7cf7b64f..d1dccb6d 100644 --- a/asl/ref/by/index.html +++ b/asl/ref/by/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/call/index.html b/asl/ref/call/index.html index b1e7d82d..ea59bb25 100644 --- a/asl/ref/call/index.html +++ b/asl/ref/call/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/cf-avg/index.html b/asl/ref/cf-avg/index.html index 3844a35f..4db2cac8 100644 --- a/asl/ref/cf-avg/index.html +++ b/asl/ref/cf-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/cf-max/index.html b/asl/ref/cf-max/index.html index c1d97843..19e6d300 100644 --- a/asl/ref/cf-max/index.html +++ b/asl/ref/cf-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/cf-min/index.html b/asl/ref/cf-min/index.html index 3cdb3f52..6515ac59 100644 --- a/asl/ref/cf-min/index.html +++ b/asl/ref/cf-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/cf-sum/index.html b/asl/ref/cf-sum/index.html index 01f60e10..d6c0d964 100644 --- a/asl/ref/cf-sum/index.html +++ b/asl/ref/cf-sum/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/cg/index.html b/asl/ref/cg/index.html index 4c78adc9..844c7921 100644 --- a/asl/ref/cg/index.html +++ b/asl/ref/cg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/clamp-max/index.html b/asl/ref/clamp-max/index.html index 7a10ac0d..9ee3d096 100644 --- a/asl/ref/clamp-max/index.html +++ b/asl/ref/clamp-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/clamp-min/index.html b/asl/ref/clamp-min/index.html index 73be467c..84663335 100644 --- a/asl/ref/clamp-min/index.html +++ b/asl/ref/clamp-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/clear/index.html b/asl/ref/clear/index.html index c3b6e287..af0b8a0d 100644 --- a/asl/ref/clear/index.html +++ b/asl/ref/clear/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/color/index.html b/asl/ref/color/index.html index 7062b067..c5fe44bf 100644 --- a/asl/ref/color/index.html +++ b/asl/ref/color/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/const/index.html b/asl/ref/const/index.html index a535c0cf..7fd5c97c 100644 --- a/asl/ref/const/index.html +++ b/asl/ref/const/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/contains/index.html b/asl/ref/contains/index.html index ca062c3e..e4608556 100644 --- a/asl/ref/contains/index.html +++ b/asl/ref/contains/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/count/index.html b/asl/ref/count/index.html index 678f593b..534d4953 100644 --- a/asl/ref/count/index.html +++ b/asl/ref/count/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/cq/index.html b/asl/ref/cq/index.html index c2cbd2e9..13c56a04 100644 --- a/asl/ref/cq/index.html +++ b/asl/ref/cq/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/decode/index.html b/asl/ref/decode/index.html index 5033b94c..2cc582e3 100644 --- a/asl/ref/decode/index.html +++ b/asl/ref/decode/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/delay/index.html b/asl/ref/delay/index.html index 89558507..8808384b 100644 --- a/asl/ref/delay/index.html +++ b/asl/ref/delay/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/depth/index.html b/asl/ref/depth/index.html index f64c0a71..1f90fac0 100644 --- a/asl/ref/depth/index.html +++ b/asl/ref/depth/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/derivative/index.html b/asl/ref/derivative/index.html index 45bfaef1..c30fd351 100644 --- a/asl/ref/derivative/index.html +++ b/asl/ref/derivative/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-epic-signal/index.html b/asl/ref/des-epic-signal/index.html index aa713da6..074766f0 100644 --- a/asl/ref/des-epic-signal/index.html +++ b/asl/ref/des-epic-signal/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-epic-viz/index.html b/asl/ref/des-epic-viz/index.html index 3757010a..a6f15eac 100644 --- a/asl/ref/des-epic-viz/index.html +++ b/asl/ref/des-epic-viz/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-fast/index.html b/asl/ref/des-fast/index.html index 1df354dc..79272e5a 100644 --- a/asl/ref/des-fast/index.html +++ b/asl/ref/des-fast/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-simple/index.html b/asl/ref/des-simple/index.html index d6ceb743..4ca1640c 100644 --- a/asl/ref/des-simple/index.html +++ b/asl/ref/des-simple/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-slow/index.html b/asl/ref/des-slow/index.html index da2c94fc..bdbc201d 100644 --- a/asl/ref/des-slow/index.html +++ b/asl/ref/des-slow/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des-slower/index.html b/asl/ref/des-slower/index.html index b20b2e49..c7d99dd1 100644 --- a/asl/ref/des-slower/index.html +++ b/asl/ref/des-slower/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/des/index.html b/asl/ref/des/index.html index 2cb0654c..509ce2cd 100644 --- a/asl/ref/des/index.html +++ b/asl/ref/des/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/dist-avg/index.html b/asl/ref/dist-avg/index.html index ca3d68ed..ee03d960 100644 --- a/asl/ref/dist-avg/index.html +++ b/asl/ref/dist-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/dist-max/index.html b/asl/ref/dist-max/index.html index 16ba72ae..f227e9f4 100644 --- a/asl/ref/dist-max/index.html +++ b/asl/ref/dist-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/dist-stddev/index.html b/asl/ref/dist-stddev/index.html index f628d510..0e2bba33 100644 --- a/asl/ref/dist-stddev/index.html +++ b/asl/ref/dist-stddev/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/div/index.html b/asl/ref/div/index.html index 34b557ed..87521f60 100644 --- a/asl/ref/div/index.html +++ b/asl/ref/div/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/drop/index.html b/asl/ref/drop/index.html index 2fad907e..859bd219 100644 --- a/asl/ref/drop/index.html +++ b/asl/ref/drop/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/dup/index.html b/asl/ref/dup/index.html index 24f6813e..46d7b9c6 100644 --- a/asl/ref/dup/index.html +++ b/asl/ref/dup/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/each/index.html b/asl/ref/each/index.html index b7c51554..cd9e3f90 100644 --- a/asl/ref/each/index.html +++ b/asl/ref/each/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/ends/index.html b/asl/ref/ends/index.html index ace686ce..b1257129 100644 --- a/asl/ref/ends/index.html +++ b/asl/ref/ends/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/eq/index.html b/asl/ref/eq/index.html index 908930dc..3b8d09c9 100644 --- a/asl/ref/eq/index.html +++ b/asl/ref/eq/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/eureka-avg/index.html b/asl/ref/eureka-avg/index.html index c6811369..7c4d4786 100644 --- a/asl/ref/eureka-avg/index.html +++ b/asl/ref/eureka-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/fadd/index.html b/asl/ref/fadd/index.html index a0cda6f1..781ab4d1 100644 --- a/asl/ref/fadd/index.html +++ b/asl/ref/fadd/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/false/index.html b/asl/ref/false/index.html index 35bcdd20..d16d7c09 100644 --- a/asl/ref/false/index.html +++ b/asl/ref/false/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/fcall/index.html b/asl/ref/fcall/index.html index 7638fe63..095068ff 100644 --- a/asl/ref/fcall/index.html +++ b/asl/ref/fcall/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/fdiv/index.html b/asl/ref/fdiv/index.html index 86787221..d0a62a9b 100644 --- a/asl/ref/fdiv/index.html +++ b/asl/ref/fdiv/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/filter/index.html b/asl/ref/filter/index.html index 97d76912..0fd20fd4 100644 --- a/asl/ref/filter/index.html +++ b/asl/ref/filter/index.html @@ -6571,7 +6571,7 @@ - Counters + Counter @@ -6592,7 +6592,7 @@ - Distribution Summaries + Distribution Summary @@ -6613,7 +6613,7 @@ - Gauges + Gauge @@ -6634,7 +6634,7 @@ - Percentile Timers + Percentile Timer @@ -6655,7 +6655,7 @@ - Timers + Timer @@ -6789,7 +6789,7 @@ - Age Gauges + Age Gauge @@ -6810,7 +6810,7 @@ - Counters + Counter @@ -6831,7 +6831,7 @@ - Distribution Summaries + Distribution Summary @@ -6873,7 +6873,7 @@ - Max Gauges + Max Gauge @@ -6890,11 +6890,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6911,11 +6911,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6932,11 +6932,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6957,7 +6978,7 @@ - Timers + Timer diff --git a/asl/ref/fmul/index.html b/asl/ref/fmul/index.html index a5ad4d6b..28c83996 100644 --- a/asl/ref/fmul/index.html +++ b/asl/ref/fmul/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/format/index.html b/asl/ref/format/index.html index 819a01d0..425669ae 100644 --- a/asl/ref/format/index.html +++ b/asl/ref/format/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/freeze/index.html b/asl/ref/freeze/index.html index 0265af21..d6a20f2f 100644 --- a/asl/ref/freeze/index.html +++ b/asl/ref/freeze/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/fsub/index.html b/asl/ref/fsub/index.html index 6500e86b..edbe7a02 100644 --- a/asl/ref/fsub/index.html +++ b/asl/ref/fsub/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/ge/index.html b/asl/ref/ge/index.html index 1d4b915b..55ad6faf 100644 --- a/asl/ref/ge/index.html +++ b/asl/ref/ge/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/get/index.html b/asl/ref/get/index.html index fa8dc897..7f7ad50e 100644 --- a/asl/ref/get/index.html +++ b/asl/ref/get/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/gt/index.html b/asl/ref/gt/index.html index aa328b57..e0a09675 100644 --- a/asl/ref/gt/index.html +++ b/asl/ref/gt/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/has/index.html b/asl/ref/has/index.html index f66245b6..5e123b23 100644 --- a/asl/ref/has/index.html +++ b/asl/ref/has/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/head/index.html b/asl/ref/head/index.html index e4a8fb60..2f76c916 100644 --- a/asl/ref/head/index.html +++ b/asl/ref/head/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/heatmap/index.html b/asl/ref/heatmap/index.html index 8fa61445..076bada4 100644 --- a/asl/ref/heatmap/index.html +++ b/asl/ref/heatmap/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/in/index.html b/asl/ref/in/index.html index a70bdb9a..98c41b29 100644 --- a/asl/ref/in/index.html +++ b/asl/ref/in/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/integral/index.html b/asl/ref/integral/index.html index 1f74410b..ba418935 100644 --- a/asl/ref/integral/index.html +++ b/asl/ref/integral/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/le/index.html b/asl/ref/le/index.html index ae27c970..8a785e80 100644 --- a/asl/ref/le/index.html +++ b/asl/ref/le/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/legend/index.html b/asl/ref/legend/index.html index 7204919a..a407876d 100644 --- a/asl/ref/legend/index.html +++ b/asl/ref/legend/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/limit/index.html b/asl/ref/limit/index.html index ca36386b..07fef804 100644 --- a/asl/ref/limit/index.html +++ b/asl/ref/limit/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/line/index.html b/asl/ref/line/index.html index 5b3c25f2..9351fb5d 100644 --- a/asl/ref/line/index.html +++ b/asl/ref/line/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/list/index.html b/asl/ref/list/index.html index 88bd7b12..0cb8953e 100644 --- a/asl/ref/list/index.html +++ b/asl/ref/list/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/ls/index.html b/asl/ref/ls/index.html index 6a9f5f43..14f6151e 100644 --- a/asl/ref/ls/index.html +++ b/asl/ref/ls/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/lt/index.html b/asl/ref/lt/index.html index eef00313..0f82fb58 100644 --- a/asl/ref/lt/index.html +++ b/asl/ref/lt/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/lw/index.html b/asl/ref/lw/index.html index 3ced4091..100bff24 100644 --- a/asl/ref/lw/index.html +++ b/asl/ref/lw/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/map/index.html b/asl/ref/map/index.html index fb80dff1..ef95db9f 100644 --- a/asl/ref/map/index.html +++ b/asl/ref/map/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/max/index.html b/asl/ref/max/index.html index f2132743..7ec97f4a 100644 --- a/asl/ref/max/index.html +++ b/asl/ref/max/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/median/index.html b/asl/ref/median/index.html index 0bb746a6..241b87e7 100644 --- a/asl/ref/median/index.html +++ b/asl/ref/median/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/min/index.html b/asl/ref/min/index.html index 8b87a7f0..1d6c6a4c 100644 --- a/asl/ref/min/index.html +++ b/asl/ref/min/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/mul/index.html b/asl/ref/mul/index.html index fbf033ea..13017226 100644 --- a/asl/ref/mul/index.html +++ b/asl/ref/mul/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/named-rewrite/index.html b/asl/ref/named-rewrite/index.html index 5e50ea96..766f444a 100644 --- a/asl/ref/named-rewrite/index.html +++ b/asl/ref/named-rewrite/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/ndrop/index.html b/asl/ref/ndrop/index.html index 68cdf5fb..5dbbee83 100644 --- a/asl/ref/ndrop/index.html +++ b/asl/ref/ndrop/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/neg/index.html b/asl/ref/neg/index.html index 51814266..850eea84 100644 --- a/asl/ref/neg/index.html +++ b/asl/ref/neg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/nip/index.html b/asl/ref/nip/index.html index 7816d69c..6ec54f1f 100644 --- a/asl/ref/nip/index.html +++ b/asl/ref/nip/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/nlist/index.html b/asl/ref/nlist/index.html index af97bd0f..d3acc2d2 100644 --- a/asl/ref/nlist/index.html +++ b/asl/ref/nlist/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/node-avg/index.html b/asl/ref/node-avg/index.html index 83137f41..df9f2b88 100644 --- a/asl/ref/node-avg/index.html +++ b/asl/ref/node-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/not/index.html b/asl/ref/not/index.html index fea30dab..9aaca210 100644 --- a/asl/ref/not/index.html +++ b/asl/ref/not/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/offset/index.html b/asl/ref/offset/index.html index 430077f5..079d20f9 100644 --- a/asl/ref/offset/index.html +++ b/asl/ref/offset/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/or/index.html b/asl/ref/or/index.html index be6dd93d..f5169244 100644 --- a/asl/ref/or/index.html +++ b/asl/ref/or/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/order/index.html b/asl/ref/order/index.html index cb67760d..8e6db219 100644 --- a/asl/ref/order/index.html +++ b/asl/ref/order/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/over/index.html b/asl/ref/over/index.html index 7391bb97..594827fb 100644 --- a/asl/ref/over/index.html +++ b/asl/ref/over/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/palette/index.html b/asl/ref/palette/index.html index 56506d13..1a04f184 100644 --- a/asl/ref/palette/index.html +++ b/asl/ref/palette/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/pct/index.html b/asl/ref/pct/index.html index baf6bc97..05cf43fd 100644 --- a/asl/ref/pct/index.html +++ b/asl/ref/pct/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/per-step/index.html b/asl/ref/per-step/index.html index c27fb1b1..0575c031 100644 --- a/asl/ref/per-step/index.html +++ b/asl/ref/per-step/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/percentiles-heatmap/index.html b/asl/ref/percentiles-heatmap/index.html index 55de31e3..82240316 100644 --- a/asl/ref/percentiles-heatmap/index.html +++ b/asl/ref/percentiles-heatmap/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/percentiles/index.html b/asl/ref/percentiles/index.html index dac2a13f..66367ffb 100644 --- a/asl/ref/percentiles/index.html +++ b/asl/ref/percentiles/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/pick/index.html b/asl/ref/pick/index.html index e2491938..78f3d891 100644 --- a/asl/ref/pick/index.html +++ b/asl/ref/pick/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/pow/index.html b/asl/ref/pow/index.html index 3620b7a5..8d7de324 100644 --- a/asl/ref/pow/index.html +++ b/asl/ref/pow/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/random/index.html b/asl/ref/random/index.html index 493627ab..f0fc31fa 100644 --- a/asl/ref/random/index.html +++ b/asl/ref/random/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/re/index.html b/asl/ref/re/index.html index d1490a88..6bf496c9 100644 --- a/asl/ref/re/index.html +++ b/asl/ref/re/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/reic/index.html b/asl/ref/reic/index.html index 8d131efb..84be9013 100644 --- a/asl/ref/reic/index.html +++ b/asl/ref/reic/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/roll/index.html b/asl/ref/roll/index.html index 0bda5a74..8a0e692b 100644 --- a/asl/ref/roll/index.html +++ b/asl/ref/roll/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rolling-count/index.html b/asl/ref/rolling-count/index.html index a3b1bdf6..36065595 100644 --- a/asl/ref/rolling-count/index.html +++ b/asl/ref/rolling-count/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rolling-max/index.html b/asl/ref/rolling-max/index.html index d9eb5f0f..50c5dfd3 100644 --- a/asl/ref/rolling-max/index.html +++ b/asl/ref/rolling-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rolling-mean/index.html b/asl/ref/rolling-mean/index.html index 5fa3cd0a..37cb02f4 100644 --- a/asl/ref/rolling-mean/index.html +++ b/asl/ref/rolling-mean/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rolling-min/index.html b/asl/ref/rolling-min/index.html index 3009f28f..4094c577 100644 --- a/asl/ref/rolling-min/index.html +++ b/asl/ref/rolling-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rolling-sum/index.html b/asl/ref/rolling-sum/index.html index 41a0a498..e17c737b 100644 --- a/asl/ref/rolling-sum/index.html +++ b/asl/ref/rolling-sum/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/rot/index.html b/asl/ref/rot/index.html index cfac0648..b64195c3 100644 --- a/asl/ref/rot/index.html +++ b/asl/ref/rot/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/s/index.html b/asl/ref/s/index.html index a6ecdb7b..918cfd94 100644 --- a/asl/ref/s/index.html +++ b/asl/ref/s/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sdes-fast/index.html b/asl/ref/sdes-fast/index.html index 66d23a55..21e27500 100644 --- a/asl/ref/sdes-fast/index.html +++ b/asl/ref/sdes-fast/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sdes-simple/index.html b/asl/ref/sdes-simple/index.html index 64a52b25..7eadb367 100644 --- a/asl/ref/sdes-simple/index.html +++ b/asl/ref/sdes-simple/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sdes-slow/index.html b/asl/ref/sdes-slow/index.html index 885705c8..00e3ae3d 100644 --- a/asl/ref/sdes-slow/index.html +++ b/asl/ref/sdes-slow/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sdes-slower/index.html b/asl/ref/sdes-slower/index.html index b99532c6..4aa0a0c3 100644 --- a/asl/ref/sdes-slower/index.html +++ b/asl/ref/sdes-slower/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sdes/index.html b/asl/ref/sdes/index.html index 692d9bd0..a55b7d54 100644 --- a/asl/ref/sdes/index.html +++ b/asl/ref/sdes/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/set/index.html b/asl/ref/set/index.html index 45bb7167..c33c99ea 100644 --- a/asl/ref/set/index.html +++ b/asl/ref/set/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sort/index.html b/asl/ref/sort/index.html index 3d37a80d..a45804c2 100644 --- a/asl/ref/sort/index.html +++ b/asl/ref/sort/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sqrt/index.html b/asl/ref/sqrt/index.html index 74608eb2..67806ac6 100644 --- a/asl/ref/sqrt/index.html +++ b/asl/ref/sqrt/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/srandom/index.html b/asl/ref/srandom/index.html index de01f55b..2237a1b8 100644 --- a/asl/ref/srandom/index.html +++ b/asl/ref/srandom/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sset/index.html b/asl/ref/sset/index.html index 52d5f967..0eb05f37 100644 --- a/asl/ref/sset/index.html +++ b/asl/ref/sset/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stack/index.html b/asl/ref/stack/index.html index bfc1cabc..fc3d3e83 100644 --- a/asl/ref/stack/index.html +++ b/asl/ref/stack/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/starts/index.html b/asl/ref/starts/index.html index 38029c9b..3d5c0dc5 100644 --- a/asl/ref/starts/index.html +++ b/asl/ref/starts/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-avg-mf/index.html b/asl/ref/stat-avg-mf/index.html index cd51e638..2b9658e6 100644 --- a/asl/ref/stat-avg-mf/index.html +++ b/asl/ref/stat-avg-mf/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-avg/index.html b/asl/ref/stat-avg/index.html index 9ee5f425..ea3e9012 100644 --- a/asl/ref/stat-avg/index.html +++ b/asl/ref/stat-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-count/index.html b/asl/ref/stat-count/index.html index 485e8b23..be3b20b2 100644 --- a/asl/ref/stat-count/index.html +++ b/asl/ref/stat-count/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-last/index.html b/asl/ref/stat-last/index.html index 4e307503..cf3c9289 100644 --- a/asl/ref/stat-last/index.html +++ b/asl/ref/stat-last/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-max-mf/index.html b/asl/ref/stat-max-mf/index.html index a922e221..743b2b52 100644 --- a/asl/ref/stat-max-mf/index.html +++ b/asl/ref/stat-max-mf/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-max/index.html b/asl/ref/stat-max/index.html index 084eaa77..7f85f693 100644 --- a/asl/ref/stat-max/index.html +++ b/asl/ref/stat-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-min-mf/index.html b/asl/ref/stat-min-mf/index.html index 9e790bc6..247e7fd8 100644 --- a/asl/ref/stat-min-mf/index.html +++ b/asl/ref/stat-min-mf/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-min/index.html b/asl/ref/stat-min/index.html index 5f1dd36e..e2be9e60 100644 --- a/asl/ref/stat-min/index.html +++ b/asl/ref/stat-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat-total/index.html b/asl/ref/stat-total/index.html index 08fe2896..3ebe2a37 100644 --- a/asl/ref/stat-total/index.html +++ b/asl/ref/stat-total/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stat/index.html b/asl/ref/stat/index.html index 8e5a4751..9168ca47 100644 --- a/asl/ref/stat/index.html +++ b/asl/ref/stat/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/stddev/index.html b/asl/ref/stddev/index.html index 1d58f808..eadaede2 100644 --- a/asl/ref/stddev/index.html +++ b/asl/ref/stddev/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sub/index.html b/asl/ref/sub/index.html index 30ef8e71..238dba4b 100644 --- a/asl/ref/sub/index.html +++ b/asl/ref/sub/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/sum/index.html b/asl/ref/sum/index.html index 1ab3e274..418a8efe 100644 --- a/asl/ref/sum/index.html +++ b/asl/ref/sum/index.html @@ -6668,7 +6668,7 @@ - Counters + Counter @@ -6689,7 +6689,7 @@ - Distribution Summaries + Distribution Summary @@ -6710,7 +6710,7 @@ - Gauges + Gauge @@ -6731,7 +6731,7 @@ - Percentile Timers + Percentile Timer @@ -6752,7 +6752,7 @@ - Timers + Timer @@ -6886,7 +6886,7 @@ - Age Gauges + Age Gauge @@ -6907,7 +6907,7 @@ - Counters + Counter @@ -6928,7 +6928,7 @@ - Distribution Summaries + Distribution Summary @@ -6970,7 +6970,7 @@ - Max Gauges + Max Gauge @@ -6987,11 +6987,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7008,11 +7008,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7029,11 +7029,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7054,7 +7075,7 @@ - Timers + Timer diff --git a/asl/ref/swap/index.html b/asl/ref/swap/index.html index db380762..d95fd507 100644 --- a/asl/ref/swap/index.html +++ b/asl/ref/swap/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/time-span/index.html b/asl/ref/time-span/index.html index 52f7fc1c..bc1eeb04 100644 --- a/asl/ref/time-span/index.html +++ b/asl/ref/time-span/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/time/index.html b/asl/ref/time/index.html index 59842549..49b09f6c 100644 --- a/asl/ref/time/index.html +++ b/asl/ref/time/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/topk-others-avg/index.html b/asl/ref/topk-others-avg/index.html index e46fd0ee..21c55376 100644 --- a/asl/ref/topk-others-avg/index.html +++ b/asl/ref/topk-others-avg/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/topk-others-max/index.html b/asl/ref/topk-others-max/index.html index de10adef..ed672e09 100644 --- a/asl/ref/topk-others-max/index.html +++ b/asl/ref/topk-others-max/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/topk-others-min/index.html b/asl/ref/topk-others-min/index.html index 13c572bb..dc69eb2b 100644 --- a/asl/ref/topk-others-min/index.html +++ b/asl/ref/topk-others-min/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/topk-others-sum/index.html b/asl/ref/topk-others-sum/index.html index 0e85f0b7..2ed9cce0 100644 --- a/asl/ref/topk-others-sum/index.html +++ b/asl/ref/topk-others-sum/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/topk/index.html b/asl/ref/topk/index.html index a2451c39..22542b2e 100644 --- a/asl/ref/topk/index.html +++ b/asl/ref/topk/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/trend/index.html b/asl/ref/trend/index.html index a7b936f5..af1096bc 100644 --- a/asl/ref/trend/index.html +++ b/asl/ref/trend/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/true/index.html b/asl/ref/true/index.html index 38612e73..095d98fa 100644 --- a/asl/ref/true/index.html +++ b/asl/ref/true/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/tuck/index.html b/asl/ref/tuck/index.html index 4ba26a58..d140a25d 100644 --- a/asl/ref/tuck/index.html +++ b/asl/ref/tuck/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/ref/vspan/index.html b/asl/ref/vspan/index.html index 9c5c61c7..ec14803a 100644 --- a/asl/ref/vspan/index.html +++ b/asl/ref/vspan/index.html @@ -6563,7 +6563,7 @@ - Counters + Counter @@ -6584,7 +6584,7 @@ - Distribution Summaries + Distribution Summary @@ -6605,7 +6605,7 @@ - Gauges + Gauge @@ -6626,7 +6626,7 @@ - Percentile Timers + Percentile Timer @@ -6647,7 +6647,7 @@ - Timers + Timer @@ -6781,7 +6781,7 @@ - Age Gauges + Age Gauge @@ -6802,7 +6802,7 @@ - Counters + Counter @@ -6823,7 +6823,7 @@ - Distribution Summaries + Distribution Summary @@ -6865,7 +6865,7 @@ - Max Gauges + Max Gauge @@ -6882,11 +6882,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6903,11 +6903,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6924,11 +6924,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6949,7 +6970,7 @@ - Timers + Timer diff --git a/asl/tutorial/index.html b/asl/tutorial/index.html index c4c71219..d2617595 100644 --- a/asl/tutorial/index.html +++ b/asl/tutorial/index.html @@ -6670,7 +6670,7 @@ - Counters + Counter @@ -6691,7 +6691,7 @@ - Distribution Summaries + Distribution Summary @@ -6712,7 +6712,7 @@ - Gauges + Gauge @@ -6733,7 +6733,7 @@ - Percentile Timers + Percentile Timer @@ -6754,7 +6754,7 @@ - Timers + Timer @@ -6888,7 +6888,7 @@ - Age Gauges + Age Gauge @@ -6909,7 +6909,7 @@ - Counters + Counter @@ -6930,7 +6930,7 @@ - Distribution Summaries + Distribution Summary @@ -6972,7 +6972,7 @@ - Max Gauges + Max Gauge @@ -6989,11 +6989,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7010,11 +7010,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7031,11 +7031,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7056,7 +7077,7 @@ - Timers + Timer diff --git a/concepts/consolidation/index.html b/concepts/consolidation/index.html index 3f016f7a..c2e40e2e 100644 --- a/concepts/consolidation/index.html +++ b/concepts/consolidation/index.html @@ -6559,7 +6559,7 @@ - Counters + Counter @@ -6580,7 +6580,7 @@ - Distribution Summaries + Distribution Summary @@ -6601,7 +6601,7 @@ - Gauges + Gauge @@ -6622,7 +6622,7 @@ - Percentile Timers + Percentile Timer @@ -6643,7 +6643,7 @@ - Timers + Timer @@ -6777,7 +6777,7 @@ - Age Gauges + Age Gauge @@ -6798,7 +6798,7 @@ - Counters + Counter @@ -6819,7 +6819,7 @@ - Distribution Summaries + Distribution Summary @@ -6861,7 +6861,7 @@ - Max Gauges + Max Gauge @@ -6878,11 +6878,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6899,11 +6899,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6920,11 +6920,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6945,7 +6966,7 @@ - Timers + Timer diff --git a/concepts/naming/index.html b/concepts/naming/index.html index 038d8c9f..d8f34f65 100644 --- a/concepts/naming/index.html +++ b/concepts/naming/index.html @@ -6694,7 +6694,7 @@ - Counters + Counter @@ -6715,7 +6715,7 @@ - Distribution Summaries + Distribution Summary @@ -6736,7 +6736,7 @@ - Gauges + Gauge @@ -6757,7 +6757,7 @@ - Percentile Timers + Percentile Timer @@ -6778,7 +6778,7 @@ - Timers + Timer @@ -6912,7 +6912,7 @@ - Age Gauges + Age Gauge @@ -6933,7 +6933,7 @@ - Counters + Counter @@ -6954,7 +6954,7 @@ - Distribution Summaries + Distribution Summary @@ -6996,7 +6996,7 @@ - Max Gauges + Max Gauge @@ -7013,11 +7013,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7034,11 +7034,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7055,11 +7055,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7080,7 +7101,7 @@ - Timers + Timer diff --git a/concepts/normalization/index.html b/concepts/normalization/index.html index 4d50ffd6..b879dcad 100644 --- a/concepts/normalization/index.html +++ b/concepts/normalization/index.html @@ -6623,7 +6623,7 @@ - Counters + Counter @@ -6644,7 +6644,7 @@ - Distribution Summaries + Distribution Summary @@ -6665,7 +6665,7 @@ - Gauges + Gauge @@ -6686,7 +6686,7 @@ - Percentile Timers + Percentile Timer @@ -6707,7 +6707,7 @@ - Timers + Timer @@ -6841,7 +6841,7 @@ - Age Gauges + Age Gauge @@ -6862,7 +6862,7 @@ - Counters + Counter @@ -6883,7 +6883,7 @@ - Distribution Summaries + Distribution Summary @@ -6925,7 +6925,7 @@ - Max Gauges + Max Gauge @@ -6942,11 +6942,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6963,11 +6963,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6984,11 +6984,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7009,7 +7030,7 @@ - Timers + Timer diff --git a/concepts/time-series/index.html b/concepts/time-series/index.html index 458c4740..a4a3b255 100644 --- a/concepts/time-series/index.html +++ b/concepts/time-series/index.html @@ -6632,7 +6632,7 @@ - Counters + Counter @@ -6653,7 +6653,7 @@ - Distribution Summaries + Distribution Summary @@ -6674,7 +6674,7 @@ - Gauges + Gauge @@ -6695,7 +6695,7 @@ - Percentile Timers + Percentile Timer @@ -6716,7 +6716,7 @@ - Timers + Timer @@ -6850,7 +6850,7 @@ - Age Gauges + Age Gauge @@ -6871,7 +6871,7 @@ - Counters + Counter @@ -6892,7 +6892,7 @@ - Distribution Summaries + Distribution Summary @@ -6934,7 +6934,7 @@ - Max Gauges + Max Gauge @@ -6951,11 +6951,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6972,11 +6972,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6993,11 +6993,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7018,7 +7039,7 @@ - Timers + Timer diff --git a/getting-started/index.html b/getting-started/index.html index 404d1c25..6bc179b7 100644 --- a/getting-started/index.html +++ b/getting-started/index.html @@ -6628,7 +6628,7 @@ - Counters + Counter @@ -6649,7 +6649,7 @@ - Distribution Summaries + Distribution Summary @@ -6670,7 +6670,7 @@ - Gauges + Gauge @@ -6691,7 +6691,7 @@ - Percentile Timers + Percentile Timer @@ -6712,7 +6712,7 @@ - Timers + Timer @@ -6846,7 +6846,7 @@ - Age Gauges + Age Gauge @@ -6867,7 +6867,7 @@ - Counters + Counter @@ -6888,7 +6888,7 @@ - Distribution Summaries + Distribution Summary @@ -6930,7 +6930,7 @@ - Max Gauges + Max Gauge @@ -6947,11 +6947,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6968,11 +6968,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6989,11 +6989,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7014,7 +7035,7 @@ - Timers + Timer diff --git a/index.html b/index.html index 6a751afd..a9526157 100644 --- a/index.html +++ b/index.html @@ -6564,7 +6564,7 @@ - Counters + Counter @@ -6585,7 +6585,7 @@ - Distribution Summaries + Distribution Summary @@ -6606,7 +6606,7 @@ - Gauges + Gauge @@ -6627,7 +6627,7 @@ - Percentile Timers + Percentile Timer @@ -6648,7 +6648,7 @@ - Timers + Timer @@ -6782,7 +6782,7 @@ - Age Gauges + Age Gauge @@ -6803,7 +6803,7 @@ - Counters + Counter @@ -6824,7 +6824,7 @@ - Distribution Summaries + Distribution Summary @@ -6866,7 +6866,7 @@ - Max Gauges + Max Gauge @@ -6883,11 +6883,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6904,11 +6904,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6925,11 +6925,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6950,7 +6971,7 @@ - Timers + Timer diff --git a/overview/index.html b/overview/index.html index 1baf4c4e..427ef8a5 100644 --- a/overview/index.html +++ b/overview/index.html @@ -6727,7 +6727,7 @@ - Counters + Counter @@ -6748,7 +6748,7 @@ - Distribution Summaries + Distribution Summary @@ -6769,7 +6769,7 @@ - Gauges + Gauge @@ -6790,7 +6790,7 @@ - Percentile Timers + Percentile Timer @@ -6811,7 +6811,7 @@ - Timers + Timer @@ -6945,7 +6945,7 @@ - Age Gauges + Age Gauge @@ -6966,7 +6966,7 @@ - Counters + Counter @@ -6987,7 +6987,7 @@ - Distribution Summaries + Distribution Summary @@ -7029,7 +7029,7 @@ - Max Gauges + Max Gauge @@ -7046,11 +7046,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7067,11 +7067,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7088,11 +7088,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7113,7 +7134,7 @@ - Timers + Timer diff --git a/search/search_index.json b/search/search_index.json index ebf47a14..405722f5 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Atlas","text":"

    Atlas was developed by Netflix to manage dimensional time series data for near real-time operational insight. Atlas features in-memory data storage, allowing it to gather and report very large numbers of metrics, very quickly.

    Atlas captures operational intelligence. Whereas business intelligence is data gathered for analyzing trends over time, operational intelligence provides a picture of what is currently happening within a system.

    Atlas was built because the existing systems Netflix was using for operational intelligence were not able to cope with the increase in metrics we were seeing as we expanded our operations in the cloud. In 2011, we were monitoring 2 million metrics related to our streaming systems. By 2014, we were at 1.2 billion metrics and the numbers continue to rise. Atlas is designed to handle this large quantity of data and can scale with the hardware we use to analyze and store it.

    For details and background on the project please read through the overview page.

    Check out the getting started page for an introduction to using Atlas in the cloud environment. Once you've explored the example, check out the stack language references to see the various types of information you can access.

    "},{"location":"getting-started/","title":"Getting Started","text":"

    The instructions on this page are for quickly getting a sample backend server running on a local machine. For other common tasks see:

    "},{"location":"getting-started/#run-a-demo-instance","title":"Run a Demo Instance","text":"

    Prerequisites

    To quickly run a version with some synthetic sample data:

    $ curl -LO https://github.com/Netflix/atlas/releases/download/v1.7.8/atlas-standalone-1.7.8.jar\n$ java -jar atlas-standalone-1.7.8.jar\n
    "},{"location":"getting-started/#explore-available-tags","title":"Explore Available Tags","text":"

    The tags API is used to explore available tags and the relationships between them.

    # show all tags\n$ curl -s 'http://localhost:7101/api/v1/tags'\n\n# show all values of the name, nf.app and type tags\n$ curl -s 'http://localhost:7101/api/v1/tags/name'\n$ curl -s 'http://localhost:7101/api/v1/tags/nf.app'\n$ curl -s 'http://localhost:7101/api/v1/tags/type'\n\n# show all name tags that also have the type tag\n$ curl -s 'http://localhost:7101/api/v1/tags/name?q=type,:has'\n\n# show all name tags that have an nf.app tag with a value of nccp\n$ curl -s 'http://localhost:7101/api/v1/tags/name?q=nf.app,nccp,:eq'\n
    "},{"location":"getting-started/#generate-graphs","title":"Generate Graphs","text":"

    These graph API URLs show off a couple of the capabilities of the Atlas backend. See the Examples page for more detailed use cases.

    # graph all metrics with a name tag value of ssCpuUser, using an :avg aggregation\n$ curl -Lo graph.png 'http://localhost:7101/api/v1/graph?q=name,ssCpuUser,:eq,:avg'\n\n# duplicate the ssCpuUser signal, check if it is greater than 22.8 and display the result as a vertical span with 30% alpha\n$ curl -Lo graph.png 'http://localhost:7101/api/v1/graph?q=name,ssCpuUser,:eq,:avg,:dup,22.8,:gt,:vspan,30,:alpha'\n
    "},{"location":"getting-started/#running-demo-with-memory-storage","title":"Running Demo with Memory Storage","text":"

    Run an instance with a configuration to use the memory storage:

    $ curl -Lo memory.conf https://raw.githubusercontent.com/Netflix/atlas/v1.7.x/conf/memory.conf\n$ java -jar atlas-standalone-1.7.8.jar memory.conf\n

    Now we can send some data to it. To quickly get started there is a sample script to send in some data:

    $ curl -Lo publish-test.sh https://raw.githubusercontent.com/Netflix/atlas/v1.7.x/scripts/publish-test.sh\n$ chmod 755 publish-test.sh\n$ ./publish-test.sh\n

    Then view the data in a web browser:

    $ open 'http://localhost:7101/api/v1/graph?q=name,randomValue,:eq,:sum,(,name,),:by'\n
    "},{"location":"overview/","title":"Overview","text":"

    Atlas is the system Netflix uses to manage dimensional time-series data for near real-time operational insight. It was primarily created to address issues with scale and query capability in the previous system.

    "},{"location":"overview/#history","title":"History","text":"

    In May of 2011, Netflix was using a home-grown solution called Epic to manage time-series data. Epic was a combination of perl CGI scripts, RRDTool logging, and MySQL. We were tracking around 2M distinct time series and the monitoring system was regularly failing to keep up with the volume of data. In addition there were a number of trends in the company which presaged a drastic increase in metric volume:

    Since that time the metric volume has continued to grow quickly. The graph below shows the increase in metrics measured over last few years:

    The growth in raw volume required increased query capability to actually use the data.

    "},{"location":"overview/#goals","title":"Goals","text":"

    The main goals for Atlas were to build a system that provided:

    "},{"location":"overview/#common-api","title":"Common API","text":"

    Epic did a number of things really well that we didn't want to lose when transitioning. In particular:

    Many of these are capabilities that are provided by the RRDTool library Epic was using, but most alternatives we looked at fell short in these categories. In addition, we have uses for other 3rd party services like CloudWatch and it is desirable to have common query capability for that data.

    "},{"location":"overview/#scale","title":"Scale","text":"

    As indicated in the history section, metrics volume was growing and we needed a system that could keep up. For a long time our biggest concern was write volume, however, we also wanted to scale in terms of the amount of data we could read or aggregate as part of a graph request.

    "},{"location":"overview/#dimensionality","title":"Dimensionality","text":"

    This is a decision that was made because users were already doing it in ways that were hard to support. Epic only support a simple name with some special case system dimensions of cluster and node. Many users were creating names like:

    com.netflix.eds.nccp.successful.requests.uiversion.nccprt-authorization.devtypid-101.clver-PHL_0AB.uiver-UI_169_mid.geo-US\n

    That breaks down to:

    Key Value name com.netflix.eds.nccp.successful.requests.uiversion nccprt authorization devtypid 101 clver PHL_0AB uiver UI_169_mid geo US

    Since it was all mangled into a name with different conventions by team, users would have to resort to complex regular expressions to slice and dice the data based on the dimensions.

    "},{"location":"overview/#query-layer","title":"Query Layer","text":"

    In order to get a common API, have flexibility for backend implementations, and provide merged views across backends we built a query layer that can be hierarchically composed. The diagram below shows the main Netflix setup:

    We have isolated regional deployments in each region we operate in as well as a global deployment that can combine the results from multiple regions. The query and aggregation operations can be performed on the fan out so most of the big summarization operations will distribute the computation across the tree and typically to an optimized storage layer at some point.

    Allowing the query and rendering layer to work on multiple backends also makes it easier for us to consider transitioning to other backends in the future such as OpenTSDB or InfluxDB. Switching to Atlas one of the biggest hurdles was compatibility and transitioning to the new system.

    "},{"location":"overview/#stack-language","title":"Stack Language","text":"

    One of our key requirements was to be able to have deep links into a particular chart and to be able to reliably pass around or embed these images via email, wikis, html pages, etc. In addition, the user who receives the link should be able to tweak the result. Atlas uses a simple stack language that has a minimal punctuation and allows arbitrarily complex graph expressions to be encoded in a URL friendly way. This means that all images can be accessed using a GET request. The stack language is also simple to parse and interpret, allowing it to be easily consumed from a variety of tools. The core features include:

    "},{"location":"overview/#graph-example","title":"Graph Example","text":"

    To illustrate, this is a sample graph image:

    This graph shows the number of requests per second and compares that with a prediction line generated using double exponential smoothing. If the number of requests per second falls below the prediction, it indicates an alert would trigger using the vertical spans. The url to generate this image follows (newlines added for readability):

    http://atlas/api/v1/graph\n  ?tz=UTC\n  &e=2012-01-01T08:00\n  &s=e-8h\n  &w=500\n  &h=150\n  &l=0\n  &q=nf.cluster,alerttest,:eq,\n     name,requestsPerSecond,:eq,:and,\n     :sum,\n     :dup,10,0.1,0.02,:des,\n     0.85,:mul,\n     :2over,:lt,\n     :rot,$name,:legend,\n     :rot,prediction,:legend,\n     :rot,:vspan,60,:alpha,alert+triggered,:legend\n

    Adding some comments to the stack expression to explain a bit what is going on:

    # Query to generate the input line\nnf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,:and,\n:sum,\n\n# Create a copy on the stack\n:dup,\n\n# Apply a DES function to generate a prediction\n# using the copy on the top of the stack. For\n# a description of the parameters see the DES\n# reference page.\n10,0.1,0.02,:des,\n\n# Used to set a threshold. The prediction should\n# be roughly equal to the line, in this case the\n# threshold would be 85% of the prediction.\n0.85,:mul,\n\n# Before              After\n# 4.                  4. actual\n# 3.                  3. prediction\n# 2. actual           2. actual\n# 1. prediction       1. prediction\n:2over,\n\n# Create a boolean signal line that is 1\n# for datapoints where the actual value is\n# less than the prediction and 0 where it\n# is greater than or equal the prediction.\n# The 1 values are where the alert should\n# trigger.\n:lt,\n\n# Apply presentation details.\n:rot,$name,:legend,\n:rot,prediction,:legend,\n:rot,:vspan,60,:alpha,alert+triggered,:legend\n

    See the stack language page for more information.

    "},{"location":"overview/#memory-storage","title":"Memory Storage","text":"

    Storage for Atlas has been a bit of a sore point. We have tried many backends and ended up moving more and more to a model where pretty much all data is stored in memory either in or off the java heap.

    "},{"location":"overview/#speed","title":"Speed","text":"

    The primary goal for Atlas is to support queries over dimensional time series data so we can slice and dice to drill down into problems. This means we frequently have a need to perform a large aggregations that involve many data points even though the final result set might be small.

    As an example consider a simple graph showing the number of requests per second hitting a service for the last 3 hours. Assuming minute resolution that is 180 datapoints for the final output. On a typical service we would get one time series per node showing the number of requests so if we have 100 nodes the intermediate result set is around 18k datapoints. For one service users went hog wild with dimensions breaking down requests by device (~1000s) and country (~50) leading to about 50k time series per node. If we still assume 100 nodes that is about 900M datapoints for the same 3h line.

    Though obviously we have to be mindful about the explosion of dimensions, we also want that where possible to be a decision based on cost and business value rather than a technical limitation.

    "},{"location":"overview/#resilience","title":"Resilience","text":"

    What all has to be working in order for the monitoring system to work? If it falls over what is involved in getting it back up? Our focus is primarily operational insight so the top priority is to be able to determine what is going on right now. This leads to the following rules of thumb:

    As a result the internal Atlas deployment breaks up the data into multiple windows based on the window of data they contain.

    With this setup we can show the last 6h of data as long as clients can successfully publish. The data is all in memory sharded across machines in the 6h clusters. Because the data and index are all in memory on the local node each instance is self-contained and doesn't need any external service to function. We typically run multiple mirrors of the 6h cluster so data is replicated and we can handle loss of an instance. In AWS we run each mirror in a different zone so that a zone failure will only impact a single mirror.

    The publish cluster needs to know all the instance in the mirror cluster and takes care of splitting the traffic up, so it goes to the correct shard. The set of mirror instances and shards are assigned based on slots from the Edda autoScalingGroups API. Since the set of instances for the mirrors change rarely, the publish instances can cache the Edda response and still retain successfully publish most data if Edda fails. If an instance is replaced and we can't update data we would have partial loss for a single shard if the same shard was missing in another mirror.

    Historical data can also fail in which case graphs would not be able to show data for some older windows. This doesn't have to be fully continuous, for example a common use case for us is to look at week-over-week (WoW) charts even though the span of the chart might only be a few hours. If the < 4d cluster fails but the < 16d cluster is functioning we could still serve that graph even though we couldn't show a continuous graph for the full week. A graph would still be shown but would be missing data in the middle.

    After data is written to the mirrors, they will flush to a persistence layer that is responsible for writing the data to the long term storage in S3. The data at full resolution is kept in S3 and we use hadoop (Elastic MapReduce) for processing the data to perform corrective merging of data from the mirrors, generate reports, and perform rollups into a form that can be loaded into the historical clusters.

    "},{"location":"overview/#cost","title":"Cost","text":"

    Keeping all data in memory is expensive in-particular with the large growth rate of data. The combination of dimensionality and time based partitioning used for resilience also give us a way to help manage costs.

    The first way is in controlling the number of replicas. In most cases we are using replicas for redundancy not to provide additional query capacity. For historical data that can be reloaded from stable storage we typically run only one replica as the duration of partial downtime was not deemed to be worth the cost for an additional replica.

    The second way is as part of the hadoop processing we can compute rollups so that we have a much smaller data volume to load in historical clusters. At Netflix the typical policy is roughly:

    Cluster Policy < 6h Keeps all data received < 4d ago Keeps most data, we do early rollup by dropping the node dimension on some business metrics < 16d ago Rollup by dropping the node dimension on all metrics older Explicit whitelist, typically recommend BI systems for these use-cases

    Using these policies we get greatly reduced index sizes for the number of distinct time series despite a significant amount of churn. With auto-scaling and red/black deployment models the set of instances change frequently so typically the intersection of distinct time series from one day to the next is less than 50%. Rollups target the dimensions which lead to that churn giving us much smaller index sizes. Also, in many cases dimensions like node that lead to this increase become less relevant after the node goes away. Deep-dive or investigative use-cases can still access the data using hadoop if needed.

    Snapshot of index sizes for one region in our environment:

    < 6h < 4d < 16d"},{"location":"overview/#ecosystem","title":"Ecosystem","text":"

    Internally there is a lot of tooling and infrastructure built up around Atlas. We are planning to open source many of these tools as time permits. This project is the first step for that with the query layer and some of the in-heap memory storage. Some additional parts that should come in the future:

    These projects were originally developed and run internally and thus only needed to be setup by our team and assume many internal infrastructure pieces to run. There is a goal to try and make this easier, but it will take some time.

    "},{"location":"api/tags/","title":"Tags","text":"

    This page is a reference for the tags API provided by Atlas.

    "},{"location":"api/tags/#uri","title":"URI","text":"

    /api/v1/tags?q=<expr>&[OPTIONS]

    "},{"location":"api/tags/#query-parameters","title":"Query Parameters","text":""},{"location":"api/tags/#callback-callback","title":"Callback (callback)","text":"

    If the format is json, the callback is used for providing JSONP output. This parameter is ignored for all other formats.

    "},{"location":"api/tags/#format-format","title":"Format (format)","text":"

    Specifies the output format to use. The default is json.

    Value Description json Outputs the graph data as a JSON object. txt Uses mime-type text/plain so it will render in the browser."},{"location":"api/tags/#limit-limit","title":"Limit (limit)","text":"

    Maximum number of results to return before paging the response. If the response is paged a x-nflx-atlas-next-offset will be set to indicate the next offset. Pass the value with an offset param to get the next part of the list. If the header is not present there is no more data.

    "},{"location":"api/tags/#offset-offset","title":"Offset (offset)","text":"

    If the response is paged this param is used to indicate where the next request should pick up from.

    "},{"location":"api/tags/#query-q","title":"Query (q)","text":"

    Query expression used to select a set of metrics and manipulate them for presentation in a graph. The query expression can use query and std commands described in the reference.

    "},{"location":"api/time-parameters/","title":"Time Parameters","text":"

    APIs that accept time ranges support three parameters:

    1. Start time (s)
    2. End time (e)
    3. Time zone (tz)
    "},{"location":"api/time-parameters/#time-zone","title":"Time Zone","text":"

    Time zone can be any valid time zone id string.

    "},{"location":"api/time-parameters/#time","title":"Time","text":""},{"location":"api/time-parameters/#absolute-times","title":"Absolute Times","text":"

    Absolute times can be specified by name or as a timestamp.

    "},{"location":"api/time-parameters/#named-times","title":"Named Times","text":"

    Named times are references that will get resolved to a timestamp when a query is executed. For example, with graphs it is common to set the end time to now.

    Name Description s User specified start time. Can only be used as part of the end parameter. e User specified end time. Can only be used as part of the start parameter. now Current time. epoch January 1, 1970 UTC."},{"location":"api/time-parameters/#timestamps","title":"Timestamps","text":"

    Explicit timestamps can use the following formats:

    Format Description %Y-%m-%d Date using the timezone for the query. The time will be 00:00. %Y-%m-%dT%H:%M Date time using the timezone for the query. The seconds will be 00. %Y-%m-%dT%H:%M:%S Date time using the timezone for the query. %s Seconds since January 1, 1970 UTC. %s (ms) Milliseconds since January 1, 1970 UTC.

    For times since the epoch both seconds and milliseconds are supported because both are in common use and it helps to avoid confusion when copy and pasting from another source. Values less than or equal 2,147,483,648 (231) will be treated as a timestamp in seconds. Values above that will be treated as a timestamp in milliseconds. So times from the epoch to 1970-01-25T20:31:23 cannot be represented in the millisecond form. In practice, this limitation has not been a problem.

    The first three formats above can also be used with an explicit time zone.

    "},{"location":"api/time-parameters/#zone-offsets","title":"Zone Offsets","text":"

    An explicit time zone can be specified as Z to indicate UTC or by using an offset in hours and minutes. For example:

    2012-01-12T01:37Z\n2012-01-12T01:37-00:00\n2012-01-12T01:37-07:00\n2012-01-12T01:37-07:42\n

    A common format recommended for logs at Netflix is an ISO timestamp in UTC:

    2012-01-12T01:37:27Z\n

    These can be copy and pasted to quickly check a graph for a timestamp from a log file. For practical purposes in Atlas a -00:00 offset timezone can be thought of as UTC, but depending on the source may have some additional meaning.

    "},{"location":"api/time-parameters/#relative-times","title":"Relative Times","text":"

    Relative times consist of a named time used for an anchor and an offset duration.

    <named=time> '-' <duration>\n<named-time> '+' <duration>\n

    For example:

    Pattern Description now-1w One week ago. e-1w One week before the end time. s+6h Six hours after the start time. s+P2DT6H5M Two days, 6 hours, and 5 minutes after the start time."},{"location":"api/time-parameters/#durations","title":"Durations","text":""},{"location":"api/time-parameters/#duration-vs-period","title":"Duration vs Period","text":"

    This section is using the definition of duration and period from the java time libraries. In short:

    The offset used for relative times in Atlas are durations because:

    "},{"location":"api/time-parameters/#simple-duration","title":"Simple Duration","text":"

    A simple offset uses a positive integer followed by one of these units:

    All durations are a fixed number of seconds. A day is 24 hours, week is 7 days, month is 30 days, and a year is 365 days.

    "},{"location":"api/time-parameters/#iso-duration","title":"ISO Duration","text":"

    The duration can also be specified as an ISO duration string, but day (D) is the largest part that can be used within the duration. Others such as week (W), month (M), and year (Y) are not supported. Examples:

    Pattern Description P1D One day of exactly 24 hours. P1DT37M One day and 37 minutes. PT5H6M Five hours and six minutes.

    For more details see docs on parsing durations.

    "},{"location":"api/graph/anonymization/","title":"Anonymization","text":"

    Occasionally it is useful to show a graph, but the exact values need to be suppressed. This can be useful for communicating with external support or including in a presentation. To avoid showing the actual values disable tick labels using tick_labels=off and either disable the legend or disable the legend stats.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend_stats=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=off\n

    If you also want to suppress the time axis, then use the only_graph option:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &only_graph=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/axis-bounds/","title":"Axis Bounds","text":"

    The upper and lower bounds for an axis can be set to an explicit floating point value or:

    When selecting bounds it is important to think about how it can impact the perception of what is shown. Automatic bounds can be useful for zooming in on the data, but can also lead to mis-perceptions for someone quickly scanning a dashboard. Consider these two graphs showing percent CPU usage on an instance:

    Automatic Bounds Explicit Bounds

    The automatic bounds allows us to see much more detail, but could lead a casual observer to think there were frequent large spikes in CPU usage rather than just noise on a machine with very little load.

    See Tick Labels for information on Y axis label formatting and suffix information.

    "},{"location":"api/graph/axis-bounds/#default-lower","title":"Default Lower","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by\n
    "},{"location":"api/graph/axis-bounds/#default-lower-stack","title":"Default Lower Stack","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#default-upper","title":"Default Upper","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :neg\n
    "},{"location":"api/graph/axis-bounds/#default-upper-stack","title":"Default Upper Stack","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :neg,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#explicit-bounds","title":"Explicit Bounds","text":"

    Note the &l=0 and &u=60e3 parameters.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &l=0\n  &u=60e3\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by\n

    Note

    It is possible to define the boundaries beyond the range of the data source so that a graph appears empty.

    "},{"location":"api/graph/axis-bounds/#auto-lower","title":"Auto Lower","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &l=auto-data\n  &\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#auto-upper","title":"Auto Upper","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &u=auto-data\n  &\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-scale/","title":"Axis Scale","text":"

    Scales determine how the data value for a line will get mapped to the Y-Axis. There are currently five scales that can be used for an axis:

    See Tick Labels for information on Y axis label formatting and suffix information.

    "},{"location":"api/graph/axis-scale/#linear","title":"Linear","text":"

    A linear scale uniformly maps the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v+b where m and b are automatically chosen based on the domain and range.

    This is the default scale for an axis and will get used if no explicit scale is set. Since 1.6, it can also be used explicitly:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=linear\n

    "},{"location":"api/graph/axis-scale/#logarithmic","title":"Logarithmic","text":"

    A logarithmic scale emphasizes smaller values when mapping the input values (domain) to the Y-axis location (range). This is often used if two lines with significantly different magnitudes are on the same axis. If v is datapoint in a time series, then y=m*log(v)+b where m and b are automatically chosen based on the domain and range. In many cases, using a separate Y-axis can be a better option that doesn't distort the line as much.

    To use this mode, add scale=log (prior to 1.6 use o=1).

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=log\n

    "},{"location":"api/graph/axis-scale/#log-linear","title":"Log Linear","text":"

    Since 1.8.

    A logarithmic scale for powers of 10 with linear behavior between ticks. This is useful for heatmap views of percentile distributions. Note that unit suffixes change with this scale.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=log-linear\n

    "},{"location":"api/graph/axis-scale/#power-of-2","title":"Power of 2","text":"

    Since 1.6.

    A power scale that emphasizes larger values when mapping the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v2+b where m and b are automatically chosen based on the domain and range. To emphasize smaller values see the square root scale.

    To use this mode, add scale=pow2.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=pow2\n

    "},{"location":"api/graph/axis-scale/#square-root","title":"Square Root","text":"

    Since 1.6.

    A power scale that emphasizes smaller values when mapping the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v0.5+b where m and b are automatically chosen based on the domain and range. To emphasize larger values see the power of 2 scale.

    To use this mode, add scale=sqrt.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=sqrt\n

    "},{"location":"api/graph/basics/","title":"Basics","text":"

    This section gives some examples to get started quickly creating simple graphs.

    "},{"location":"api/graph/basics/#single-line","title":"Single Line","text":"

    The only required parameter is q which specifies the query expression for a line. The other two common parameters are for setting the start time, s, and the end time, e, for the data being shown. Usually the start time will be set relative to the end time, such as e-3h, which indicates 3 hours before the end time. See time parameters for more details on time ranges.

    Putting it all together:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n

    The resulting PNG plot displays time along the X axis, automatically scaled to the proper time range. The Y axis labels are scaled using metric prefixes to show the measured value. A legend is displayed under the plot with the name(s) of the expression results and a set of statistics computed on the plotted data for the time window. The small text at the very bottom reflect query parameters and step size along with some processing statistics.

    "},{"location":"api/graph/basics/#adding-a-title","title":"Adding a Title","text":"

    The graph title can be set using the title parameter. Similarly, a Y-axis label can be set using the ylabel parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n  &title=Starts+Per+Second\n  &ylabel=sps\n

    "},{"location":"api/graph/basics/#multiple-lines","title":"Multiple Lines","text":"

    Multiple expressions can be placed on a chart by concatenating the expressions, e.g., showing a query expression along with a constant value:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    500e3\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#group-by","title":"Group By","text":"

    Multiple lines can also be a result of a single expression via group by.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#simple-math","title":"Simple Math","text":"

    A number of operators are provided to manipulate a line. See the math section of the stack language tutorial for a complete list. Example that negates the value of a line:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    :neg\n  &s=e-2d\n

    Example that negates and then applies absolute value to get the original value back (since all values were positive in the input):

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    :neg,\n    :abs\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#binary-operations","title":"Binary Operations","text":"

    Lines can be combined using binary math operators such as add or multiply. Example using divide:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    1000,:div\n  &s=e-2d\n

    If used with a group by, then either:

    "},{"location":"api/graph/basics/#both-sides-grouped","title":"Both Sides Grouped","text":"

    Dividing by self with both sides grouped:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :dup,\n    :div\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#one-side-grouped","title":"One Side Grouped","text":"

    Dividing a grouped expression by a constant:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    1000,:div\n  &s=e-2d\n

    Equivalent to the previous expression, but the right-hand side is grouped and it uses multiply instead of divide:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    0.001,name,sps,:eq,\n    (,nf.cluster,),:by,\n    :mul\n  &s=e-2d\n

    "},{"location":"api/graph/color-palettes/","title":"Color Palettes","text":"

    The following color palettes are supported:

    There is also a hashed selection mode that can be used so that a line with a given label will always get the same color.

    "},{"location":"api/graph/color-palettes/#armytage","title":"Armytage","text":"

    This is the default color palette, it comes from the paper A Colour Alphabet and the Limits of Colour Coding by Paul Green-Armytage. Two colors, Xanthin and Yellow, are excluded because users found them hard to distinguish from a white background when used for a single pixel line. So overall there are 24 distinct colors with this palette.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=armytage\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#epic","title":"Epic","text":"

    This is a legacy palette that alternates between shades of red, green, and blue. It is supported for backwards compatibility, but not recommended.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=epic\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#blues","title":"Blues","text":"

    Shades of blue.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=blues\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#greens","title":"Greens","text":"

    Shades of green.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=greens\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#oranges","title":"Oranges","text":"

    Shades of orange.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=oranges\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#purples","title":"Purples","text":"

    Shades of purple.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=purples\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#reds","title":"Reds","text":"

    Shades of red.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=reds\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#custom","title":"Custom","text":"

    A custom color palette can be provided for a graph by using a list of comma separated hex color values following the ASL list format (,HEX,HEX,HEX,). This is mainly used to customize the colors for the result of a group by where you cannot set the color for each line using the list.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=(,1a9850,91cf60,d9ef8b,fee08b,fc8d59,d73027,)\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#hashed-selection","title":"Hashed Selection","text":"

    Any of the palettes above can be prefixed with hash: to select the color using a hashing function on the label rather than picking the next color from the list. The primary advantage is that the selected color will always be the same for a given label using a particular palette. However, some nice properties of the default mode are lost:

    The table below illustrates the difference by adding some additional lines to a chart for the second row:

    armytage hash:armytage

    Example:

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=hash:armytage\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack\n  &tz=UTC\n

    "},{"location":"api/graph/examples/","title":"Examples","text":"

    Browse the sidebar to get a good overview of graph options. It is recommended to at least go through the basics section. There is also a quick visual index below:

    Line Area Stack Stacked Percent

    VSpan Transparency Line Width Palettes

    Bounds Scales Multi Y Time Zones

    "},{"location":"api/graph/graph/","title":"Introduction","text":"

    The Graph API is the primary means to retrieve data from an Atlas store.

    The default response is a PNG image plotting data matching the Atlas Stack Language expression along with optional parameters to control time ranges, size, style, labels, etc. For a quick overview by example see the examples page.

    If graphs look familiar, that's because the design and language were inspired by RRDtool. RRD style graphs offer concise and highly customizable views of time series data. While a number of observability tools offer dynamic charts, a major benefit of these PNG graphs is the ability to snapshot data in time, particularly when that data may expire from a high throughput data store; PNGs are forever. Additionally, the majority of email and on-call systems support images out of the box without having to worry about porting a dynamic graphing library to various browsers and clients.

    The API only supports HTTP query strings at this time. This makes it easy to construct queries with tooling and share the URIs with other users. No JSON request payloads needed.

    Additional Output formats, including JSON, can be found in Outputs.

    "},{"location":"api/graph/graph/#uri","title":"URI","text":"

    /api/v1/graph?q=<expr>[&OPTIONS]

    "},{"location":"api/graph/graph/#http-method","title":"HTTP Method","text":"

    GET - Only the GET method is allowed at this time.

    "},{"location":"api/graph/graph/#query-parameters","title":"Query Parameters","text":""},{"location":"api/graph/graph/#data","title":"Data","text":"

    The only required query param is q which is the query expression used by the user to select and manipulate data. The simplest API query you can make is /api/v1/graph?q=42. This will produce a graph from Atlas with a straight line having a value of 42 for 3 hours* with a legend including statistics for the query period.

    All query params related to fetching data:

    Name Description Default Type q Query expression must be specified by user expr step Step size for data auto duration

    Warning

    In most cases users should not set step directly. The step parameter is deprecated.

    "},{"location":"api/graph/graph/#time","title":"Time","text":"

    There are three parameters to control the time range used for a graph:

    Name Description Default Type s Start time e-3h* Time e End time now* Time tz Time zone US/Pacific* Time zone ID

    For more information on the behavior see the time parameters page.

    "},{"location":"api/graph/graph/#image-flags","title":"Image Flags","text":"Name Description Default Type title Set the graph title no title String no_legend Suppresses the legend 0 boolean no_legend_stats Suppresses summary stats for the legend 0 boolean axis_per_line Put each line on a separate Y-axis 0 boolean only_graph Only show the graph canvas 0 boolean vision Simulate different vision types normal vision type"},{"location":"api/graph/graph/#image-size","title":"Image Size","text":"

    There are four parameters to control the image size and layout used for a graph:

    Name Description Default Type layout Mode for controlling exact or relative sizing canvas layout mode w Width of the canvas or image 700* int h Height of the canvas or image 300* int zoom Transform the size by a zoom factor 1.0 float

    For more information on the behavior see the graph layout page.

    "},{"location":"api/graph/graph/#y-axis","title":"Y-Axis","text":"Name Description Default Type stack Set the default line style to stack 0 boolean l Lower bound for the axis auto-style axis bound u Upper bound for the axis auto-style axis bound ylabel Label for the axis no label String palette Color palette to use armytage palette o Use a logarithmic scale (deprecated in 1.6) 0 boolean scale Set the axis scale to use (since 1.6) linear scale tick_labels Set the mode to use for tick labels decimal tick label mode sort Set the mode to use for sorting the legend expr order sort mode order Set the order ascending or descending for the sort asc order"},{"location":"api/graph/graph/#output-format","title":"Output Format","text":"Name Description Default Type format Output format to use png output format callback Method name to use for JSONP callback none String"},{"location":"api/graph/graph/#defaults","title":"Defaults","text":"

    If marked with an * the default shown can be changed by the administrator for the Atlas server. As a result the default in the table may not match the default you see. The defaults listed do match those used for the primary Atlas backends in use at Netflix.

    For users running their own server, the config settings and corresponding query params are:

    Key Query Param atlas.webapi.graph.start-time s atlas.webapi.graph.end-time e atlas.webapi.graph.timezone tz atlas.webapi.graph.width w atlas.webapi.graph.height h atlas.webapi.graph.palette palette"},{"location":"api/graph/graph/#boolean-flags","title":"Boolean Flags","text":"

    Flags with a true or false value are specified using 1 for true and 0 for false.

    "},{"location":"api/graph/heatmap/","title":"Heatmap","text":"

    Atlas primarily supports visualizing data in line charts. As of 1.8, Atlas can also visualize via heatmaps using the :heatmap line style. The graph area is broken up into a series of cells and a count for each cell is incremented when a measurement falls within the cells boundaries. Colors or shades of colors then fill in cells based on the final count.

    "},{"location":"api/graph/heatmap/#percentiles","title":"Percentiles","text":"

    Heatmaps are particularly useful on top of percentile metrics to analyze the entire measurement range.

    Note Using the log linear scale will help to highlight clustered regions of measurements via &scale=log-linear. The example also uses data not available in the demo Atlas instance.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,requestLatency,:eq,\n    :percentile-heatmap,\n    \n  &scale=log-linear\n

    "},{"location":"api/graph/heatmap/#bounds","title":"Bounds","text":"

    The &heatmap_l= and &heatmap_u parameters can be used to narrow the range of cells displayed in a heatmap. Heatmap bounds act on the count of measurements in a cell and the palette colors or shades chosen. Depending on the bound limits, some cells may appear empty.

    No Heatmap BoundsWith Bounds (&heatmap_l=1.2&heatmap_u=1.3)
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#palette","title":"Palette","text":"

    The palette used for filling heatmap cells can be changed via the &heatmap_palette= parameter. By default, a color is chosen from the global palette (based on whether the heatmap is the first or a later expression). A gradient is then applied to that color with a lighter gradient representing smaller cell counts and darker representing larger counts.

    Default PaletteReds Palette (&heatmap_palette=reds)
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#custom-palette","title":"Custom Palette","text":"

    A custom palette may be provided by listing the hex colors to use in descending order, meaning the color to use for the highest cell counts must appear first.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :heatmap\n  &heatmap_palette=(,8cd1b9,46adbc,2a91b8,1978b3,335ca9,413e95,361566,)\n

    For further information, see Custom Color Palettes.

    "},{"location":"api/graph/heatmap/#order-of-expressions","title":"Order of Expressions","text":"

    When overlaying expressions with a heatmap and using the default palette, the order of expressions determines the color gradient used for cells. For example, if the heatmap expression is second in the query, the second palette color will be used as the gradient:

    Heatmap FirstHeatmap Second
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap,\nname,sps,:eq,\n\n
    name,sps,:eq,\nname,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#label","title":"Label","text":"

    The label for the heatmap can be changed via the &heatmap_label= parameter. By default, the label is simply heatmap.

    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#scale","title":"Scale","text":"

    Similar to axis scales, the scale of the heatmap cell colors (or gradients) can be adjusted using the &heatmap_scale= parameter. By default, the scale is linear though any of the valid scales may be used.

    LinearLog Linear
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/layout/","title":"Layout","text":"

    The diagram below shows the parts of an Atlas graph and will be used when describing the behavior for various options.

    The layout for graph images is trying to accomplish two main goals:

    "},{"location":"api/graph/layout/#usable-canvas-size","title":"Usable Canvas Size","text":"

    Keep the canvas usable regardless of the number of lines, axes, etc that are competing for space. For example, the canvas area should not become too small due to the number of lines on the chart.

    Good Layout Poor Layout"},{"location":"api/graph/layout/#canvas-alignment","title":"Canvas Alignment","text":"

    Make it easy to align the canvas portion of several graphs on an html page. This is important because it makes it easier to find visual correlations between multiple graphs on a dashboard.

    In particular if arranged in a grid with the image in the top left of each cell, then the canvas should line up vertically for columns:

    And horizontally for rows:

    In the graph layout diagram at the top, this is why variable components such as multi axes, legend entries, and warnings are positioned on either the right side or the bottom of the canvas.

    "},{"location":"api/graph/layout/#modes","title":"Modes","text":"

    There are four supported layout modes that can be used with the layout query parameter:

    "},{"location":"api/graph/layout/#examples","title":"Examples","text":""},{"location":"api/graph/layout/#canvas","title":"Canvas","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=canvas\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image","title":"Image","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=image\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image-width","title":"Image Width","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=iw\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image-height","title":"Image Height","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=ih\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/legends/","title":"Legends","text":"

    Options for adjusting legend:

    "},{"location":"api/graph/legends/#automatic","title":"Automatic","text":"

    If no explicit legend is specified, then the system will generate an automatic legend that summarizes the expression. There is no particular guarantee about what it will contain and in some cases it is difficult to generate a usable legend automatically. Example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    hourOfDay,:time,\n    100,:mul,\n    minuteOfHour,:time,\n    :add\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#explicit","title":"Explicit","text":"

    The legend for a line can be explicitly set using the :legend operator.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    hourOfDay,:time,\n    100,:mul,\n    minuteOfHour,:time,\n    :add,\n    time+value,:legend\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#variables","title":"Variables","text":"

    Tag keys can be used as variables to plug values into the legend. This is useful when working with group by operations to customize the legend for each output. The variable can be expressed as a $ followed by the tag key if it is the only part of the legend:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    $nf.cluster,:legend\n  &s=e-1w\n

    Or as $( the tag key and a closing ) if combined with other text:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    cluster+$(nf.cluster)+sps,:legend\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#disable","title":"Disable","text":"

    Legends can be disabled using the no_legend graph parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#disable-stats","title":"Disable Stats","text":"

    You can also save veritical space and keep the legend by disabling the summary stats shown in the legend using the no_legend_stats graph parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend_stats=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#sorting","title":"Sorting","text":"

    By default the legend for an axis will be ordered based on the order of the expressions on the stack. If an expression results in multple lines, i.e. a group by, then they will be sorted by the label.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n

    "},{"location":"api/graph/legends/#overall","title":"Overall","text":"

    To sort all lines on a given axis using a different mode use the sort URL parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n  &sort=max\n

    To change it to descending order use the order parameter, e.g.:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n  &sort=max\n  &order=desc\n

    "},{"location":"api/graph/legends/#group-by-expression","title":"Group By Expression","text":"

    If more control is needed, then sorting can be applied to a particular group by expression. This can be useful for things like alerting visualizations where some common lines like the threshold and trigger indicator should be pinned to the top, but it is desirable to sort other results based on a stat like max. For example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :dup,\n    :max,\n    120e3,:gt,\n    30,:alpha,\n    :vspan,\n    trigger,:legend,\n    :swap,\n    max,:sort,\n    desc,:order,\n    $nf.cluster,:legend\n  &s=e-12h\n

    "},{"location":"api/graph/legends/#sorting-modes","title":"Sorting Modes","text":""},{"location":"api/graph/legends/#sorting-order","title":"Sorting Order","text":""},{"location":"api/graph/line-attributes/","title":"Line Attributes","text":"

    In addition to the line style and legend the following attributes can be adjusted:

    "},{"location":"api/graph/line-attributes/#color","title":"Color","text":"

    By default the color will come from the palette that is in use. However the color for a line can also be set explicitly using the :color operator:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    f00,:color\n  &s=e-1w\n

    Note, that for a group by all results will get the same attributes, so in this case all would end up being the same color:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    f00,:color\n  &s=e-1w\n

    "},{"location":"api/graph/line-attributes/#transparency","title":"Transparency","text":"

    The transparency of a line can be set using the :alpha operator or by explicitly setting the alpha channel as part of the color.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    :area,\n    40,:alpha\n  &s=e-2d\n

    Setting the alpha explicitly as part of the color:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    :area,\n    40ff0000,:color\n  &s=e-2d\n

    "},{"location":"api/graph/line-attributes/#line-width","title":"Line Width","text":"

    Adjust the stroke width used for a line:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    3,:lw\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/","title":"Line Styles","text":"

    There are four line styles available:

    Multiple styles can be used in the same chart or combined with other operations.

    "},{"location":"api/graph/line-styles/#line","title":"Line","text":"

    The default style is line.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :line\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#area","title":"Area","text":"

    Area will fill the space between the line and 0 on the Y-axis. The alpha setting is just used to help visualize the overlap.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :area,\n    40,:alpha\n  &s=e-1w\n

    Similarly for negative values:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :neg,\n    :area,\n    40,:alpha\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#stack","title":"Stack","text":"

    Stack is similar to area, but will stack the filled areas on top of each other.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack\n  &s=e-1w\n

    Similarly for negative values:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :neg,\n    :stack\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#stacked-percentage","title":"Stacked Percentage","text":"

    The stack style can be combined with the :pct operator to get a stacked percentage chart for a group by:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :pct,\n    :stack\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#heatmap","title":"Heatmap","text":"

    Since 1.8.

    Plotting many time series with a heat map can be useful for identifying concentrations of measurements where individual lines may produce too much noise.

    See Heatmap for more details.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :heatmap\n

    "},{"location":"api/graph/line-styles/#vertical-span","title":"Vertical Span","text":"

    The vertical span style converts non-zero to spans. This is often used to highlight some portion of another line.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    50e3,:gt,\n    :vspan\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#combinations","title":"Combinations","text":"

    Line styles can be combined, e.g., to highlight the portion of a line that is above a threshold:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    5003,:gt,\n    :vspan,\n    40,:alpha,\n    50e3\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#layering","title":"Layering","text":"

    The z-order is based on the order of the expression on the stack.

    /api/v1/graph?\n  e=2015-03-10T13:13\n  &no_legend=1\n  &q=\n    t,name,sps,:eq,\n    :sum,\n    :set,\n    t,:get,\n    :stack,\n    t,:get,\n    1.1,:mul,\n    6h,:offset,\n    t,:get,\n    4,:div,\n    :stack\n  &s=e-2d\n

    "},{"location":"api/graph/multi-y/","title":"Multi Y Axis","text":"

    Examples for using multiple Y-axes:

    "},{"location":"api/graph/multi-y/#explicit","title":"Explicit","text":"

    By default all lines will go on axis 0, the one on the left side. A different axis can be specified using the :axis operation.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    "},{"location":"api/graph/multi-y/#explicit-bounds","title":"Explicit Bounds","text":"

    By default all axes will pick up axis settings with no qualifier:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=0\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    Bounds and other axis settings can be set per axis, e.g., this graph moves the constant line for 42 to a separate axis and sets the lower bound to 0 via the &l.1=0 parameter. This would work as well for &u.1=100e3. Append the index after the l. or u. :

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l.1=0\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    "},{"location":"api/graph/multi-y/#axis-per-line","title":"Axis Per Line","text":"

    There is a convenience operation to plot each line on a separate axis.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &axis_per_line=1\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-p,:re,\n    :and,\n    (,nf.cluster,),:by\n

    If there are too many lines and it would be over the max Y-axis limit, then a warning will be shown:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &axis_per_line=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n

    "},{"location":"api/graph/multi-y/#palettes","title":"Palettes","text":"

    The color of the first line on an axis will get used as the color of the axis. The intention is to make it easy to understand which axis a line is associated with and in an image dynamic clues like hover cannot be used. Generally it is recommended to only have one line per axis when using multi-Y. Example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=01\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    minuteOfHour,:time,\n    1,:axis\n

    Though we recommend not using more than one line per axis with multi-Y, a color palette can be specified for a specific axis. This can be used to select shades of a color for an axis so it is still easy to visually associate which axis a line belongs to:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=01\n  &palette.0=reds\n  &palette.1=blues\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack,\n    minuteOfHour,:time,\n    1,:axis\n

    "},{"location":"api/graph/outputs/","title":"Output Formats","text":"

    The following output formats are supported by default for graphing:

    "},{"location":"api/graph/outputs/#png","title":"png","text":"

    This is the default and creates a PNG image for the graph. The mime type is image/png.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=png\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-3m\n  &tz=UTC\n

    "},{"location":"api/graph/outputs/#csv","title":"csv","text":"

    Comma separated value output. The mime type is text/csv.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=csv\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    \"timestamp\",\"hourOfDay\",\"minuteOfHour\",\"NaN\"\n2012-01-01T08:56:00Z,8.000000,56.000000,NaN\n2012-01-01T08:57:00Z,8.000000,57.000000,NaN\n2012-01-01T08:58:00Z,8.000000,58.000000,NaN\n2012-01-01T08:59:00Z,8.000000,59.000000,NaN\n2012-01-01T09:00:00Z,9.000000,0.000000,NaN\n
    "},{"location":"api/graph/outputs/#txt","title":"txt","text":"

    Same as csv except that the separator is a tab character instead of a comma. The mime type will be text/plain so it is more likely to render directly in the browser rather than trigger a download.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=txt\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    \"timestamp\" \"hourOfDay\" \"minuteOfHour\"  \"NaN\"\n2012-01-01T08:56:00Z    8.000000    56.000000   NaN\n2012-01-01T08:57:00Z    8.000000    57.000000   NaN\n2012-01-01T08:58:00Z    8.000000    58.000000   NaN\n2012-01-01T08:59:00Z    8.000000    59.000000   NaN\n2012-01-01T09:00:00Z    9.000000    0.000000    NaN\n
    "},{"location":"api/graph/outputs/#json","title":"json","text":"

    JSON output representing the data. Note that it is not standard json as numeric values like NaN will not get quoted.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"values\" : [ [ 8.0, 56.0, NaN ], [ 8.0, 57.0, NaN ], [ 8.0, 58.0, NaN ], [ 8.0, 59.0, NaN ], [ 9.0, 0.0, NaN ] ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/outputs/#stdjson","title":"std.json","text":"

    Same as json except that numeric values which are not recognized by standard json will be quoted. The mime type is application/json.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=std.json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"values\" : [ [ 8.0, 56.0, \"NaN\" ], [ 8.0, 57.0, \"NaN\" ], [ 8.0, 58.0, \"NaN\" ], [ 8.0, 59.0, \"NaN\" ], [ 9.0, 0.0, \"NaN\" ] ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/outputs/#statsjson","title":"stats.json","text":"

    Provides the summary stats for each line, but not all of the data points. The mime type is application/json.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=stats.json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"end\" : 1325408460000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"stats\" : [ {\n    \"count\" : 5,\n    \"avg\" : 8.2,\n    \"total\" : 41.0,\n    \"max\" : 9.0,\n    \"min\" : 8.0,\n    \"last\" : 9.0\n  }, {\n    \"count\" : 5,\n    \"avg\" : 46.0,\n    \"total\" : 230.0,\n    \"max\" : 59.0,\n    \"min\" : 0.0,\n    \"last\" : 0.0\n  }, {\n    \"count\" : 0,\n    \"avg\" : NaN,\n    \"total\" : NaN,\n    \"max\" : NaN,\n    \"min\" : NaN,\n    \"last\" : NaN\n  } ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/tick/","title":"Tick Labels","text":"

    The following tick (Y axis numeric labels) modes are supported:

    "},{"location":"api/graph/tick/#decimal","title":"Decimal","text":"

    This is the default mode. Y-axis tick labels will be formatted using the metric prefix to indicate the magnitude for values that are greater than one thousand or less than one.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=decimal\n

    Really large values will fallback to scientific notation, e.g.:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    1e180,:mul\n  &s=e-1w\n  &tick_labels=decimal\n

    "},{"location":"api/graph/tick/#binary","title":"Binary","text":"

    For values such as memory sizes it is sometimes more convenient to view the label using a power of 1024 rather than a power of 1000. If the tick label mode is set to binary, then the IEC binary prefix will be used.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=binary\n

    "},{"location":"api/graph/tick/#duration","title":"Duration","text":"

    Since 1.7.1.

    Useful for timers or percentiles that measure latency, provides ticks with time unit suffix.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,requestLatency,:eq,\n    nf.node,wii-node,:eq,\n    :and\n  &tick_labels=duration\n

    "},{"location":"api/graph/tick/#off","title":"Off","text":"

    For presentations or sharing it is sometimes useful to anonymize the chart. One way of doing that is to disable the Y-axis labels by setting the tick label mode to off.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=off\n

    "},{"location":"api/graph/tick/#offset-labels","title":"Offset Labels","text":"

    In situations where a graph has very small changes in value that generate a significant number of digits per tick, ticks may be labeled with offsets in order to fit the labels in the layout. A base value is displayed at the bottom of the axis and positive or negative offsets from the base displayed next to the ticks.

    For example, if the amount of disk space used varies by 1 byte occasionally, the ticks will be labeled by in increments of +1.0.

    Note

    It is possible for queries spanning different data sources to display offset labels due to differing schemes used to encode floating point values.

    If offsets are not desirable, try adjusting the y axis bounds.

    "},{"location":"api/graph/time-shift/","title":"Time Shift","text":"

    A common use-case is to compare a given line with a shifted line to compare week-over-week or day-over-day.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-silverlight,:eq,\n    :and,\n    :sum,\n    :dup,\n    1w,:offset\n

    The $(atlas.offset) variable can be used to show the offset in a custom legend:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-silverlight,:eq,\n    :and,\n    :sum,\n    :dup,\n    1w,:offset,\n    :list,\n    (,$nf.cluster+(offset=$atlas.offset),:legend,\n    ),:each\n

    "},{"location":"api/graph/time-zone/","title":"Time Zones","text":"

    Examples for specifying the time zone:

    "},{"location":"api/graph/time-zone/#single-zone","title":"Single Zone","text":"

    Most graphs will only show a single time zone. By default the zone is US/Pacific. To set to another zone such as UTC use the tz query parameter:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &tz=UTC\n

    "},{"location":"api/graph/time-zone/#multi-zone","title":"Multi Zone","text":"

    The tz parameter can be specified multiple times in which case one X-axis will be shown per zone. Start and end times will be based on the first time zone listed.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n  &tz=US/Eastern\n  &tz=US/Pacific\n  &tz=UTC\n

    "},{"location":"api/graph/time-zone/#daylight-savings-time","title":"Daylight Savings Time","text":"

    If using a time zone that changes for daylight savings time, then you will see duplicate or missing hours on the time axis labels during the transition period. For example, a duplicate hour:

    /api/v1/graph?\n  e=2015-11-01T08:00\n  &q=\n    name,sps,:eq\n  &s=e-12h\n  &tz=US/Pacific\n  &tz=UTC\n

    A missing hour:

    /api/v1/graph?\n  e=2015-03-08T08:00\n  &q=\n    name,sps,:eq\n  &s=e-12h\n  &tz=US/Pacific\n  &tz=UTC\n

    If looking at a longer time frame, then it can also throw off the alignment so ticks will not be on significant time boundaries, e.g.:

    /api/v1/graph?\n  e=2015-11-05T08:00\n  &q=\n    name,sps,:eq\n  &s=e-1w\n  &tz=US/Pacific\n  &tz=UTC\n

    "},{"location":"api/graph/vision/","title":"Color Blindness","text":"

    The vision parameter can be used to simulate different types of color blindness. Permitted values are:

    "},{"location":"api/graph/vision/#normal","title":"Normal","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=normal\n
    "},{"location":"api/graph/vision/#protanopia","title":"Protanopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=protanopia\n
    "},{"location":"api/graph/vision/#protanomaly","title":"Protanomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=protanomaly\n
    "},{"location":"api/graph/vision/#deuteranopia","title":"Deuteranopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=deuteranopia\n
    "},{"location":"api/graph/vision/#deuteranomaly","title":"Deuteranomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=deuteranomaly\n
    "},{"location":"api/graph/vision/#tritanopia","title":"Tritanopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=tritanopia\n
    "},{"location":"api/graph/vision/#tritanomaly","title":"Tritanomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=tritanomaly\n
    "},{"location":"api/graph/vision/#achromatopsia","title":"Achromatopsia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=achromatopsia\n
    "},{"location":"api/graph/vision/#achromatomaly","title":"Achromatomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=achromatomaly\n
    "},{"location":"asl/","title":"Index","text":"

    The asl-finetuning.tsv file is a collection of ChatGPT training data manually extracted from the Markdown files in this section of the repo, that can be converted to a format suitable for helping ChatGPT learn about Atlas Stack Language.

    See the Fine-tuning section of the OpenAI documentation for more details.

    "},{"location":"asl/alerting-expressions/","title":"Alerting Expressions","text":"

    The stack language provides some basic techniques to convert an input line into a set of signals that can be used to trigger and visualize alert conditions. This section assumes a familiarity with the stack language and the alerting philosophy.

    "},{"location":"asl/alerting-expressions/#signal-line","title":"Signal Line","text":"

    A signal line is a time series that indicates whether or not a condition is true for a particular interval. They are modelled by having zero indicate false and non-zero, typically 1, indicating true. Alerting expressions map some input time series to a set of signal lines that indicate true when in a triggering state.

    "},{"location":"asl/alerting-expressions/#threshold-alerts","title":"Threshold Alerts","text":"

    To start we need an input metric. For this example the input will be a sample metric showing high CPU usage for a period:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum\n

    Lets say we want to trigger an alert when the CPU usage goes above 80%. To do that simply use the :gt operator and append 80,:gt to the query:

    The result is a signal line that is non-zero, typically 1, when in a triggering state and zero when everything is fine.

    "},{"location":"asl/alerting-expressions/#dampening","title":"Dampening","text":"

    Our threshold alert above will trigger if the CPU usage is ever recorded to be above the threshold. Alert conditions are often combined with a check for the number of occurrences. This is done by using the :rolling-count operator to get a line showing how many times the input signal has been true withing a specified window and then applying a second threshold to the rolling count.

    InputRolling CountDampened Signal
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt\n
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt,\n5,:rolling-count\n
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt,\n5,:rolling-count,\n4,:gt\n
    "},{"location":"asl/alerting-expressions/#visualization","title":"Visualization","text":"

    A signal line is useful to tell whether or not something is in a triggered state, but can be difficult for a person to follow. Alert expressions can be visualized by showing the input, threshold, and triggering state on the same graph.

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:2over,\n:gt,\n:vspan,\n40,:alpha,\ntriggered,:legend,\n:rot,\ninput,:legend,\n:rot,\nthreshold,:legend,\n:rot\n
    "},{"location":"asl/alerting-expressions/#summary","title":"Summary","text":"

    You should now know the basics of crafting an alert expression using the stack language. Other topics that may be of interest:

    "},{"location":"asl/alerting-philosophy/","title":"Alerting Philosophy","text":"

    It is recommended for all alerts to adhere to the follow guidelines:

    1. Keep conditions simple.
    2. Alerts should be actionable.
    3. Check for measured failure on critical paths rather than a lack of success.
    4. Alerts should not have special cases for routine maintenance.
    5. Consider how the alert check can fail.
    "},{"location":"asl/alerting-philosophy/#keep-it-simple","title":"Keep It Simple","text":"

    When an alert triggers, it should be easy to understand why. Similarly, if an alert doesn't fire, then it should be easy to check and see what happened. The more complicated an alert condition becomes, the harder it is to understand and debug.

    It is recommended to keep alert rules as a simple expression with a threshold and number of occurrences. An example of this is the following rule:

    CPU Usage > 80% for at least 5 minutes\n

    Multiple signals should only be combined if it improves the effectiveness of the alert. For example, what is an appropriate threshold for the number of requests that have error responses? What happens to that threshold if your cluster auto-scales? It is more effective to define the threshold as a percentage of total requests:

    (Num Errors / Num Total) > 0.01 for at least 5 minutes\n

    In some cases, a low volume can make the percentages less meaningful and result in false positives. For example, if your daily traffic pattern follows a sine curve, then the troughs may not represent a meaningful error percentage. Another example might be during failover exercises, if traffic has been failed over to another cluster. One way to compensate for this is to check the failure rate and overall volume:

    Percentage of Failures > X AND Volume > Y\n

    As a general rule, bias towards simplicity. If you are creating more complex expressions, then stop and think about why that complexity is needed. Are there other signals available that are easier to use? Can the application be changed so that it reports metrics which make it easier to diagnose?

    "},{"location":"asl/alerting-philosophy/#actionable-alerts","title":"Actionable Alerts","text":"

    If an alert fires and sends a notification to users, someone should be motivated to investigate the problem. Alerts that are noisy or not actionable train people to ignore or filter out alert notifications.

    For cases where the response to an alert can be automated, such as terminating a bad instance, it shouldn't send out a notification unless there is a failure to perform the action. If you want a summary of cluster health, then use dashboards or reporting tools for this function; don't attempt to do this via alert notifications.

    Alerts should check something important. To setup effective alerts, you need to understand the application and have ways to detect failures for critical functionality. Avoid general system-type alerts that won't be investigated. For example, should you alert on high CPU usage? If you have done squeeze testing and you have information to indicate how CPU usage impacts the application, then it can be useful and it will provide a way to know a problem is coming before it impacts clients of the service. If you do not have this knowledge, then your alert may be under-tuned, leading to noisy notifications that may be ignored.

    "},{"location":"asl/alerting-philosophy/#check-for-measured-failure","title":"Check for Measured Failure","text":"

    It is better to check for failures rather than trying to trigger based on an absence of information or a reduction in the amount of success.

    "},{"location":"asl/alerting-philosophy/#absence-of-information","title":"Absence of Information","text":"

    A typical example of this is a process that runs over a longer time period. For example, suppose we have an application that updates a metadata cache once per day and it takes an hour to refresh. It is not recommended to send an event on refresh success and then configure alerts based on the absence of the success event. Design the signals so you have a clear way to understand what error conditions may be occurring on and then alert if there is a problem.

    In this example, a better design would use a gauge that reports the loading time and a gauge that reports the age of the cache. You can then add alerts when the gauges for these error conditions exceed unacceptable thresholds.

    "},{"location":"asl/alerting-philosophy/#reduction-in-success","title":"Reduction in Success","text":"

    Let's say we have a server that is taking traffic and we want to know if users are experiencing problems. How should we go about this? It is often tempting to look for things like a drop in the number of successful requests, because this can be a generic catch-all for many types of problems.

    However, alerts of this sort are inherently noisy. How do you know what the number of requests should be? While there are various schemes for trying to predict the behavior, you will spend a lot of time tuning alerts of this nature to get them to the point where they are not too noisy, but they still catch real issues. Further, these schemes cannot differentiate between problems for the service and unrelated drops such as a client having problems and failing to make the request in the first place.

    If you're not going to investigate these alerts when they fire or invest in tuning and maintaining them, just avoid this type of alert altogether.

    A better approach is to alert on the number of failures you are seeing from a service. Thresholds can often be determined automatically by looking at the percent of all requests that are failures. For middle tier services, it is also likely that data from the clients can be used to see a percentage of failure from a client perspective instead of, or in addition to, the server side view.

    "},{"location":"asl/alerting-philosophy/#avoid-special-cases","title":"Avoid Special Cases","text":"

    Alerts shouldn't have to be tuned or suppressed during regular maintenance such as replacing instance or doing deployments. As a simple example, consider an alert on the rate of failures. The general assumption would be that a deployment should not be noticed by clients and therefore the alert is still relevant. Alerts that are actionable and look for measured failure tend to work well. If a new instance is coming up, a lack of activity will mean a lack of failures until traffic is being received. At that time if there are failures they should be noticed.

    "},{"location":"asl/alerting-philosophy/#startup-behavior","title":"Startup Behavior","text":"

    What about different behavior during startup? Consider some examples for an application that has a long initialization time (~20 minutes) before it can take traffic:

    For a discovery service like Eureka, the duration of the startup time shouldn't be an issue because the state clearly indicates if it is STARTING vs DOWN.

    If the healthcheck is used for a load balancer, then the decision to send traffic to instances should be fairly sensitive in order to minimize the impact to users. The bigger concern is the number of occurrences of healthcheck failures in a row, which can trigger automated actions like terminating an instance. When evaluating healthcheck failures, there are two distinct conditions to evaluate: non-200 responses and connection timeouts.

    The healthcheck logic should be tied to the Eureka heartbeat so that if the healthcheck is failing due to a non-200 response, the discovery state will be DOWN after initialization is complete. For the first condition, the alert should check for the number of occurrence of the DOWN state in the discovery service which will not trigger for the STARTING state used during application initialization.

    For the second condition, you would need to check for a disparity between the published discovery state and the healthcheck state:

    (DiscoveryStatus is UP) AND (Healthcheck != 200) for N minutes\n

    Note, unless you really need to do this it is probably better to just look at the healthcheck and have the num occurrences set to be longer than the typical startup time.

    For the CPU example, first reconsider whether general system check alerts are actually useful. Is it going to help you catch a real problem and be investigated when it triggers? If not, don't setup an alert on CPU and rely on alerts that check for failures on the critical path.

    If it is useful and you have squeeze testing results or other information so you know when a proxy metric like CPU actually indicates a problem, then you can configure it restricted with some signal that indicates the status. However, keep in mind that not all systems will allow complex expressions. For example, if you are auto-scaling will you be able to send the data such that it doesn't incorrectly skew the alarm? The more signals that are combined the harder it is to understand the alert and the more likely it is to fail in unexpected ways. Before adding more layers of duct tape think hard about the application and if you can change it to be easier to monitor and diagnose.

    "},{"location":"asl/alerting-philosophy/#deployments","title":"Deployments","text":"

    At Netflix, a common deployment model is red/black. In this model, a new auto-scaling group the same size as the existing one will be created, traffic will transition over, and eventually the old auto-scaling group (ASG) will be deleted. This can create false alarms if you haven't thought about the signals being used to fire alerts.

    The most common alerting problem that occurs during deployments is related the use of averages. For example, the average request rate will drop in half if a new ASG comes up and you are aggregating across a cluster consisting of both old and new ASGs. If you follow the advice given earlier about crafting alerts based on a percentage of errors reported by clients of the application, then aggregating across clusters by sum usually won't be a problem. If the deployment is going well, then the overall failure rate seen by clients shouldn't be impacted.

    Another example of a deployment alerting problem is latency measurements. How can you tell the average latency across a cluster composed of new and old ASGs? Rather than trying to special case or exclude the new group of instances, you should define the alert signal based on the actual activity seen. If there is no activity within an ASG, then it will not impact the signal.

    Metrics libraries like Spectator send both a totalTime and count measurement separately to the backend. This allows the average to be computed using a simple sum aggregate with division:

    Sum(totalTime per instance in cluster) / Sum(count per instance in cluster)\n

    This calculation demonstrates how instances that are not receiving traffic will not contribute anything to the sums.

    "},{"location":"asl/alerting-philosophy/#think-about-failure","title":"Think About Failure","text":"

    An effective alert needs to be able to fire when there is a problem. However, when problems occur, it is possible that the problem will also impact the underlying data or mechanisms used to detect issues for the alert. It is worthwhile to spend time thinking about the ways in which your alerts can fail to detect events.

    "},{"location":"asl/alerting-philosophy/#how-can-signals-fail","title":"How Can Signals Fail?","text":"

    The simplest area to think about is what is collecting and reporting the data. For example, if data is being reported by the plugin running in the application, then it won't work if the application crashes or cannot start. It is recommended to have some basic alerts using a data pipeline that will fail independently from the application. At Netflix, this typically involves checking the following conditions:

    The metric data for those signals comes from a separate poller application. If these succeed, then the application should be healthy enough that alerts triggered from data local to the instance should be working.

    "},{"location":"asl/alerting-philosophy/#alerting-scopes","title":"Alerting Scopes","text":"

    At Netflix, alert expressions for Atlas can be checked in three places:

    In practice, for a given application, the alerting scopes look like:

    Alerting scopes can be used to provide some level of redundancy with different failure modes. For example, the failure rate could be checked against the server stats and the client stats. Further, it is recommended to check alerts as close as possible to where the data is initially measured and collected. In other words, it is better to check the alerts on the plugin or poller rather than against the backend. The advantages of doing this are:

    So why not check all alerts on the client or poller? The primary disadvantages:

    "},{"location":"asl/des/","title":"Double Exponential Smoothing","text":"

    Double exponential smoothing (DES) is a simple technique for generating a smooth trend line from another time series. This technique is often used to generate a dynamic threshold for alerting.

    Warning

    Alerts on dynamic thresholds should be expected to be noisy. They are looking for strange behavior rather than an actual problem causing impact. Make sure you will actually spend the time to tune and investigate the alarms before using this approach. See the alerting philosophy guide for more information on best practices.

    "},{"location":"asl/des/#tuning","title":"Tuning","text":"

    The :des operator takes 4 parameters:

    Note

    For most use cases, the sliding variant of DES, :sdes, should be used instead.

    "},{"location":"asl/des/#training","title":"Training","text":"

    The training parameter defines how many intervals to allow the DES to warmup. In the graph below the gaps from the start of the chart to the smoothed lines reflects the training window used:

    Typically a training window of 10 has been sufficient as DES will adjust to the input fairly quick. However, in some cases if there is a massive change in the input it can cause DES to oscillate, for example:

    "},{"location":"asl/des/#alpha","title":"Alpha","text":"

    Alpha is the data smoothing factor. A value of 1 means no smoothing. The closer the value gets to 0 the smoother the line should get. Example:

    "},{"location":"asl/des/#beta","title":"Beta","text":"

    Beta is a trend smoothing factor. Visually it is most apparent when alpha is small. Example with alpha = 0.01:

    "},{"location":"asl/des/#recommended-values","title":"Recommended Values","text":"

    Experimentally we have converged on 3 sets of values based on how quickly it should adjust to changing levels in the input signal.

    Helper Alpha Beta :des-fast 0.1 0.02 :des-slower 0.05 0.03 :des-slow 0.03 0.04

    Here is an example of how they behave for a sharp drop and recovery:

    For a more gradual drop:

    If the drop is smooth enough then DES can adjust without ever triggering.

    "},{"location":"asl/des/#alerting","title":"Alerting","text":"

    For alerting purposes the DES line will typically get multiplied by a fraction and then checked to see whether the input line drops below the DES value for a given interval.

    # Query to generate the input line\nnf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,:and,\n:sum,\n\n# Create a copy on the stack\n:dup,\n\n# Apply a DES function to generate a prediction\n:des-fast,\n\n# Used to set a threshold. The prediction should\n# be roughly equal to the line, in this case the\n# threshold would be 85% of the prediction.\n0.85,:mul,\n\n# Create a boolean signal line that is 1\n# for datapoints where the actual value is\n# less than the prediction and 0 where it\n# is greater than or equal the prediction.\n# The 1 values are where the alert should\n# trigger.\n:lt,\n\n# Apply presentation details.\n:rot,$name,:legend,\n

    The vertical spans show when the expression would have triggered with due to the input dropping below the DES line at 85%:

    "},{"location":"asl/des/#epic-macros","title":"Epic Macros","text":"

    There are two helper macros, des-epic-signal and des-epic-viz, that match the behavior of the previous epic DES alarms. The first generates a signal line for the alarm. The second creates a visualization to make it easier to see what is happening. Both take the following arguments:

    Examples:

    nf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,\n:and,\n:sum,\n10,0.1,0.02,0.15,0.15,10,:des-epic-viz\n

    Example with no lower bound:

    nf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,\n:and,\n:sum,\n10,0.1,0.02,0.15,NaN,10,:des-epic-viz\n
    "},{"location":"asl/tutorial/","title":"Tutorial","text":"

    Atlas Stack Language is designed to be a stable method of representing complex data queries in a URL-friendly format. It is loosely based on the RPN expressions supported by Tobias Oetiker's rrdtool. The following is an example of a stack language expression:

    nf.cluster,discovery,:eq,(,nf.zone,),:by

    This example pushes two strings nf.cluster and discovery onto the stack and then executes the command :eq. The equal command pops two strings from the stack and pushes a query object onto the stack. The behavior can be described by the stack effect String:key String:value \u2013 Query. We then push a list of tag keys to the stack and execute the command :by to group the results.

    "},{"location":"asl/tutorial/#parts","title":"Parts","text":"

    There are only four reserved symbols used for structuring the expression: ,:()

    1. Commas separate items on the stack. So a,b puts two strings on the stack with values \"a\" and \"b\".
    2. Colon is used to prefix operations. If the first character is a colon the item will be treated as a command to run. For example, a,:dup, will push \"a\" on the stack and then execute the duplicate operation.
    3. Parenthesis are used to indicate the start and end of a list. The expression (,) puts an empty list on the stack. Commands inside of a list will not be executed unless the list is passed to the call command. For example, (,:dup,) will push a list with a single string value of \":dup\" on to the stack.
    "},{"location":"asl/tutorial/#data-model","title":"Data Model","text":"

    The stack language is primarily used for representing expressions over tagged time series data. A tag is a string key value pair used to describe a measurement. Atlas requires at least one tag with a key of name. Example tags represented as a JSON map:

    {\n  \"name\":       \"jvm.gc.pause\",\n  \"cause\":      \"Allocation_Failure\",\n  \"statistic\":  \"count\",\n  \"nf.app\":     \"www\",\n  \"nf.cluster\": \"www-main\",\n  \"nf.asg\":     \"www-main-v001\",\n  \"nf.stack\":   \"main\",\n  \"nf.node\":    \"i-01\",\n  \"nf.region\":  \"us-east-1\",\n  \"nf.zone\":    \"us-east-1a\"\n}\n

    Typically, tags should be dimensions that allow you to use the name as a pivot and other tags to drill down into the data. The tag keys are similar to columns in a traditional table, however, it is important to note that not all time series will have the same set of tag keys.

    The tags are used to identify a time series, which conceptually is a set of timestamp value pairs. Here is a simplified data set shown as a table:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)] cpuUsage www i-02 [(05:00, 20.0), (05:01, 37.0)] cpuUsage db i-03 [(05:00, 57.0), (05:01, 62.0)] diskUsage www i-01 [(05:00, 9.0), (05:01, 9.0)] diskUsage www i-02 [(05:00, 7.0), (05:01, 8.0)] requestRate www [(05:00, 33.0), (05:01, 31.0)]

    The table above will be used for the examples in later sections.

    "},{"location":"asl/tutorial/#simple-expressions","title":"Simple Expressions","text":"

    All expressions generally have four parts:

    1. Choosing: selects a set of time series.
    2. Aggregation: defines how to combine the selected time series.
    3. Math: manipulate the time series values or combine aggregated results with binary operations.
    4. Presentation: adjust how the data is presented in a chart.
    "},{"location":"asl/tutorial/#choosing","title":"Choosing","text":"

    The \"choosing\" or predicate section is used to select a set of time series. The primary predicate operators are :eq and :and.

    Sample query to select all time series where the key node is equal to i-01:

    node,i-01,:eq\n

    If you are familiar with SQL and assume that tag keys are column names, then this would be equivalent to:

    select * from time_series where node = 'i-01';\n

    Using the example data set this query would return the following subset:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)] diskUsage www i-01 [(05:00, 9.0), (05:01, 9.0)]

    To get just the cpu usage for that node, use :and:

    node,i-01,:eq,\nname,cpuUsage,:eq,\n:and\n

    This would result in:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)]"},{"location":"asl/tutorial/#aggregation","title":"Aggregation","text":"

    An aggregation function maps a set of time series that matched the predicate to a single time series. Atlas supports four aggregate functions: sum, min, max, and count. If no aggregate is specified on an expression, then sum will be used implicitly.

    Using the example data set, these two expressions would be equivalent:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and\n
    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:sum\n

    And would result in a single output time series:

    name app values cpuUsage www [(05:00, 53.0), (05:01, 68.0)]

    Note that the node is not present in the output. The set of tags on the output will be ones with exact matches in the predicate clause or explicitly listed in the group by.

    If you wanted the max cpu for the application, then you would write:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:max\n

    What if we want the average? The count aggregate is used to determine how many time series had a value for a given time. To get the average we divide the sum by the count.

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:dup,\n:sum,\n:swap,\n:count,\n:div\n

    There is a helper macro :avg that will do this for you, so you can write:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg\n
    "},{"location":"asl/tutorial/#group-by","title":"Group By","text":"

    In many cases we want to group the results that were selected and return one aggregate per group. As an example suppose I want to see maximum cpu usage by application:

    name,cpuUsage,:eq,\n:max,\n(,app,),:by\n

    Using the example data set, this would result in a two output time series:

    name app values cpuUsage www [(05:00, 33.0), (05:01, 37.0)] cpuUsage db [(05:00, 57.0), (05:01, 62.0)]"},{"location":"asl/tutorial/#math","title":"Math","text":"

    Once you have a set of lines, it can be useful to manipulate them. The supported operations generally fall into two categories: unary operations to alter a single time series and binary operations that combine two time series.

    Examples of unary operations are negate and absolute value. To apply the absolute value:

    app,web,:eq,\nname,cpu,:eq,\n:and,\n:sum,\n:abs\n

    Multiple operations can be applied, for example, negating the line then applying the absolute value:

    app,web,:eq,\nname,cpu,:eq,\n:and,\n:sum,\n:neg,\n:abs\n

    Common binary operations are add, subtract, multiply, and divide. The aggregation section has an example of using divide to compute the average.

    "},{"location":"asl/tutorial/#presentation","title":"Presentation","text":"

    Once you have a final expression, you can apply presentation settings to alter how a time series is displayed in the chart. One of the most common examples is setting the label to use for the legend:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\naverage+cpu+usage,:legend\n

    You can also use tag keys as variables in the legend text, for example, setting the legend to the application:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\n(,app,),:by,\n$(app),:legend\n

    It is also common to adjust the how the lines are shown. For example, to stack each of the lines we can use the :stack command to adjust the line style:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\n(,app,),:by,\n:stack,\n$(app),:legend\n
    "},{"location":"asl/ref/-rot/","title":"-rot","text":"Input Stack:ba... \u21e8 Output Stack:a...b

    Rotate the stack so that the item at the top is now at the bottom.

    Example:

    a,b,c,d,:-rot\n
    PosInputOutput 0 d c 1 c b 2 b a 3 a d"},{"location":"asl/ref/2over/","title":"2over","text":"Input Stack:ba \u21e8 Output Stack:baba

    Shorthand equivalent to writing: :over,:over

    Example:

    a,b,:2over\n
    PosInputOutput 0 b b 1 a a 2 b 3 a"},{"location":"asl/ref/abs/","title":"abs","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the absolute value of the input time series.

    Examples:

    064-64
    0,:abs\n
    64,:abs\n
    -64,:abs\n
    "},{"location":"asl/ref/add/","title":"add","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 + ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a addNaN b) where a and b are the corresponding intervals in the input time series. Sample:

    :add 3.0 0.0 1.0 1.0 NaN Input 1 1.0 0.0 1.0 1.0 NaN Input 2 2.0 0.0 0.0 NaN NaN

    Use the fadd operator to get strict floating point behavior.

    Examples

    Example adding a constant:

    BeforeAfter
    name,sps,:eq,\n30e3\n
    name,sps,:eq,\n30e3,:add\n

    Example adding two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:add\n
    "},{"location":"asl/ref/all/","title":"all","text":"

    Warning

    Deprecated: use :by instead. This operation is primarily intended for debugging and results can be confusing unless you have detailed understanding of Atlas internals.

    Input Stack:Query \u21e8 Output Stack:DataExpr

    Avoid aggregation and output all time series that match the query.

    "},{"location":"asl/ref/alpha/","title":"alpha","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the alpha value for the colors on the line. The value should be a two digit hex number where 00 is transparent and ff is opague. This setting will be ignored if the color setting is used for the same line.

    BeforeAfter
    name,sps,:eq,\n:sum,\n:stack\n
    name,sps,:eq,\n:sum,\n:stack,\n40,:alpha\n
    BeforeAfter
    name,sps,:eq,\n:sum,\n:stack,\nf00,:color\n
    name,sps,:eq,\n:sum,\n:stack,\nf00,:color,\n40,:alpha\n
    "},{"location":"asl/ref/and/","title":"and","text":"

    There are two variants of the :and operator.

    "},{"location":"asl/ref/and/#choosing","title":"Choosing","text":"Input Stack:q2: Queryq1: Query \u21e8 Output Stack:(q1 AND q2): Query

    This first variant is used for choosing the set of time series to operate on. It is a binary operator that matches if both of the sub-queries match. For example, consider the following query:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/and/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 AND ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a AND b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a AND b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for all intervals where the corresponding values of a and b are both non-zero. Example:

    BeforeAfter
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n310,:lt\n
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n310,:lt,\n:and\n
    "},{"location":"asl/ref/area/","title":"area","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be area. In this mode the line will be filled to 0 on the Y-axis.

    See the line style examples page for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:area\n
    "},{"location":"asl/ref/as/","title":"as","text":"Input Stack:replacement: Stringoriginal: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Map a tag key name to an alternate name. This can be useful for cases where it is desirable to perform a binary math operation, but the two sides use different tag keys for the same concept. The common IPC metrics are an example where it might be desirable to compare RPS for servers and their clients. The server side RPS would group by nf.app while the client side view would group by ipc.server.app.

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nnf.cluster,c,:as,\n$c,:legend\n
    "},{"location":"asl/ref/avg/","title":"avg","text":"

    Average or mean aggregation operator. There are two variants of the :avg operator.

    "},{"location":"asl/ref/avg/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper method that computes the average or mean from one or more time series using the count aggregate to determine how many time series have data at an interval and dividing the sum of the values by the count. This avoids issues where one or time series are missing data at a specific time resulting in an artificially low average. E.g. the expression:

    name,ssCpuUser,:eq,\n:avg\n

    when matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the average. This leads to a final result of:

    NameData ssCpuUser [3.33, 3.66, 4.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/avg/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the average of all the time series from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by,\n:avg\n
    "},{"location":"asl/ref/axis/","title":"axis","text":"Input Stack:IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Specify which Y-axis to use for the line. The value specified is the axis number and should be an integer in the range 0 to 4 inclusive.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n42\n
    name,sps,:eq,\n:sum,\n42,1,:axis\n
    "},{"location":"asl/ref/bottomk-others-avg/","title":"bottomk-others-avg","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes an average aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-avg\n
    "},{"location":"asl/ref/bottomk-others-max/","title":"bottomk-others-max","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a max aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-max\n
    "},{"location":"asl/ref/bottomk-others-min/","title":"bottomk-others-min","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a min aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-min\n
    "},{"location":"asl/ref/bottomk-others-sum/","title":"bottomk-others-sum","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a sum aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-sum\n
    "},{"location":"asl/ref/bottomk/","title":"bottomk","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk\n

    In some cases it can be useful to see an aggregate summary of the other time series that were not part of the bottom set. This can be accomplished using the :bottomk-others-$(aggr) operators. For more details see:

    "},{"location":"asl/ref/by/","title":"by","text":"

    Group by operator. There are two variants of the :by operator.

    "},{"location":"asl/ref/by/#aggregation","title":"Aggregation","text":"Input Stack:keys: List[String]AggregationFunction \u21e8 Output Stack:DataExpr

    Groups the matching time series by a set of keys and applies an aggregation to matches of the group.

    name,ssCpu,:re,\n(,name,),:by\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The aggregation function will be applied independently for each group. In this example above there are two matching values for the group by key name. This leads to a final result of:

    NameData ssCpuSystem [9.0, 11.0, 13.0] ssCpuUser [10.0, 11.0, 8.0]

    The name tag is included in the result set since it is used for the grouping.

    "},{"location":"asl/ref/by/#math","title":"Math","text":"Input Stack:keys: List[String]TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Groups the time series from the input expression by a set of keys and applies an aggregation to matches of the group. The keys used for this grouping must be a subset of keys from the initial group by clause. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,nf.node,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,nf.node,),:by,\n:count,\n(,nf.cluster,),:by\n
    "},{"location":"asl/ref/call/","title":"call","text":"Input Stack:?List \u21e8 Output Stack:?

    Pops a list off the stack and executes it as a program.

    Example:

    (,a,),:call\n
    Pos Input Output 0 List(a) a"},{"location":"asl/ref/cf-avg/","title":"cf-avg","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be average.

    "},{"location":"asl/ref/cf-max/","title":"cf-max","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be max.

    "},{"location":"asl/ref/cf-min/","title":"cf-min","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be min.

    "},{"location":"asl/ref/cf-sum/","title":"cf-sum","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be sum.

    "},{"location":"asl/ref/cg/","title":"cg","text":"Input Stack:keys: List[String]Expr \u21e8 Output Stack:Expr

    Recursively add a list of keys to group by expressions. This can be useful for tooling that needs to adjust existing expressions to include keys in the grouping.

    BeforeAfter
    name,sps,:eq,\n(,nf.app,),:by\n
    name,sps,:eq,\n(,nf.app,),:by,\n(,nf.cluster,),:cg\n
    "},{"location":"asl/ref/clamp-max/","title":"clamp-max","text":"Input Stack:DoubleTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Restricts the maximum value of the output time series to the specified value. Values from the input time series that are less than or equal to the maximum will not be changed.

    A common use-case is to allow for auto-scaled axis up to a specified bound. The axis parameters for controlling the axis bounds have the following limitations:

    Consider the following graph:

    The spike makes it difficult to make out any detail for other times. One option to handle this is to use an alternate axis scale such as logarithmic that gives a higher visual weight to the smaller values. However, it is often easier for a user to reason about a linear scale, in particular, for times when there is no spike in the graph window. If there is a known max reasonable value, then the :clamp-max operator can be used to restrict the line if and only if it exceeds the designated max. For example, if we limit the graph above to 25:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n60e3,:clamp-max\n
    "},{"location":"asl/ref/clamp-min/","title":"clamp-min","text":"Input Stack:DoubleTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Restricts the minimum value of the output time series to the specified value. Values from the input time series that are greater than or equal to the minimum will not be changed. A common use-case is to allow for auto-scaled axis up to a specified bound. For more details see :clamp-max.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n200e3,:clamp-min\n
    "},{"location":"asl/ref/clear/","title":"clear","text":"Input Stack:... \u21e8 Output Stack:

    Remove all items from the stack.

    Example:

    a,b,c,:clear\n
    PosInputOutput 0 c 1 b 2 a"},{"location":"asl/ref/color/","title":"color","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the color for the line. The value should be one of:

    For queries with multiple time series, color palettes are available to automatically assign different colors to the various series. See Color Palettes.

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\nff0000,:color\n
    "},{"location":"asl/ref/const/","title":"const","text":"Input Stack:Double \u21e8 Output Stack:TimeSeriesExpr

    Generates a line where each datapoint is a constant value. Any double value that is left on the stack will get implicitly converted to a constant line, so this operator is typically not used explicitly.

    BeforeAfter
    42\n
    42,:const\n
    "},{"location":"asl/ref/contains/","title":"contains","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key includes the specified substring. For example, consider the following query:

    name,Cpu,:contains\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/count/","title":"count","text":"

    Count aggregation operator. There are two variants of the :count operator.

    "},{"location":"asl/ref/count/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Compute the number of time series that match the query and have a value for a given interval.

    name,ssCpuUser,:eq,\n:count\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the count. This leads to a final result of:

    NameData ssCpuUser [3.0, 3.0, 2.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/count/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the number of time series from the input expression and have a value for a given interval. Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:count\n
    "},{"location":"asl/ref/cq/","title":"cq","text":"Input Stack:QueryExpr \u21e8 Output Stack:Expr

    Recursively AND a common query to all queries in an expression. If the first parameter is not an expression, then it will be not be modified.

    Example:

    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq,\n:cq\n
    BeforeAfter
    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq\n
    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq,\n:cq\n
    BeforeAfter
    42,nf.app,alerttest,:eq\n
    42,nf.app,alerttest,:eq,\n:cq\n
    "},{"location":"asl/ref/decode/","title":"decode","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Note

    It is recommended to avoid using special symbols or trying to encode structural information into tag values. This feature should be used sparingly and with great care to ensure it will not result in a combinatorial explosion.

    Perform decoding of the legend strings. Generally data going into Atlas is restricted to simple ascii characters that are easy to use as part of a URI. Most commonly the clients will convert unsupported characters to an _. In some case it is desirable to be able to reverse that for the purposes of presentation.

    Since: 1.5

    Example:

    Hex to ASCII
    1,one_21_25_26_3F,:legend,\nhex,:decode\n
    "},{"location":"asl/ref/delay/","title":"delay","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Delays the values by the window size. This is similar to the :offset operator except that it can be applied to any input line instead of just changing the time window fetched with a DataExpr. Short delays can be useful for alerting to detect changes in slightly shifted trend lines.

    Since: 1.6

    BeforeAfterCombined
    name,requestsPerSecond,:eq,\n:sum\n
    name,requestsPerSecond,:eq,\n:sum,\n5,:delay\n
    name,requestsPerSecond,:eq,\n:sum,\n:dup,\n5,:delay\n
    "},{"location":"asl/ref/depth/","title":"depth","text":"Input Stack:... \u21e8 Output Stack:Int...

    Push the depth of the stack.

    Since: 1.5.0

    Examples:

    ,:depth\n
    PosInputOutput 0 0
    a,:depth\n
    PosInputOutput 0 a 1 1 a
    a,b,:depth\n
    PosInputOutput 0 b 2 1 a b 2 a"},{"location":"asl/ref/derivative/","title":"derivative","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Opposite of :integral. Computes the rate of change per step of the input time series.

    DerivativeIntegralIntegral Then Derivative
    1,:derivative\n
    1,:integral\n
    1,:integral,\n:derivative\n
    "},{"location":"asl/ref/des-epic-signal/","title":"des-epic-signal","text":"Input Stack:noise: DoubleminPercent: DoublemaxPercent: Doublebeta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for configuring DES in a manner compatible with legacy epic alerts. For more information see the epic macros section of the DES page.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n10,0.1,0.5,0.2,0.2,4,:des-epic-signal\n
    "},{"location":"asl/ref/des-epic-viz/","title":"des-epic-viz","text":"Input Stack:noise: DoubleminPercent: DoublemaxPercent: Doublebeta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for configuring DES in a manner compatible with legacy Epic alerts. For more information see the epic macros section of the DES page.

    Example
    name,sps,:eq,\n:sum,\n10,0.1,0.5,0.2,0.2,4,:des-epic-viz\n
    "},{"location":"asl/ref/des-fast/","title":"des-fast","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to quickly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-fast should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-fast\n
    "},{"location":"asl/ref/des-simple/","title":"des-simple","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using default values.

    Warning

    The values used by this operation are prone to wild oscillations. See recommended values for better options.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-simple\n
    "},{"location":"asl/ref/des-slow/","title":"des-slow","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to slowly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-slow should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-slow\n
    "},{"location":"asl/ref/des-slower/","title":"des-slower","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to slowly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-slower should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-slower\n
    "},{"location":"asl/ref/des/","title":"des","text":"Input Stack:beta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Double exponential smoothing. For most use-cases sliding DES should be used instead to ensure a deterministic prediction.

    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum\n
    name,requestsPerSecond,:eq,\n:sum,\n5,0.1,0.5,:des\n
    "},{"location":"asl/ref/dist-avg/","title":"dist-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the average recorded value for timers and distribution summaries. This is calculated by dividing the total amount recorded by the number of recorded values.

    For [Timer] and Distribution Summary metrics, the totalTime (timers) /totalAmount (distributions) and count are collected each time a measurement is taken. If this technique was applied to a request latency metric, then you would have the average latency per request for an arbitrary grouping. These types of metrics have an explicit count based on activity. To get an average per measurement manually:

    statistic,totalTime,:eq,\n:sum,\nstatistic,count,:eq,\n:sum,\n:div\n

    This expression can be bound to a query using the :cq (common query) operator:

    statistic,totalTime,:eq,\n:sum,\nstatistic,count,:eq,\n:sum,\n:div,\nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:cq\n

    Using the :dist-avg function reduces the query to:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg\n

    To compute the average by group, apply the group after the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-avg\n
    "},{"location":"asl/ref/dist-max/","title":"dist-max","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the maximum recorded value for timers and distribution summaries. This is a helper for aggregating by the max of the max statistic for the meter.

    A manual query would look like:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\nstatistic,max,:eq,\n:and,\n:max\n

    Using :dist-max the query is reduced to:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-max\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-max\n
    "},{"location":"asl/ref/dist-stddev/","title":"dist-stddev","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the standard deviation for timers and distribution summaries.

    A manual query would look like:

    statistic,count,:eq,\n:sum,\nstatistic,totalOfSquares,:eq,\n:sum,\n:mul,\nstatistic,totalTime,:eq,\n:sum,\n:dup,\n:mul,\n:sub,\nstatistic,count,:eq,\n:sum,\n:dup,\n:mul,\n:div,\n:sqrt,\nnf.cluster,foo,:eq,\n name,http.req.latency,:eq,\n:and,\n:cq\n

    This is much simpler using the :dist-stddev function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-stddev\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-stddev\n
    "},{"location":"asl/ref/div/","title":"div","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 / ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a / b) where a and b are the corresponding intervals in the input time series. If a and b are 0, then 0 will be returned for the interval. If only b is 0, then NaN will be returned as the value for the interval. Sample data:

    :div 0.5 0.0 NaN NaN NaN Input 1 1.0 0.0 1.0 1.0 NaN Input 2 2.0 0.0 0.0 NaN NaN

    Use the fdiv operator to get strict floating point behavior.

    Example dividing a constant:

    BeforeAfter
    name,sps,:eq,\n42\n
    name,sps,:eq,\n42,:div\n

    Example adding two series:

    BeforeAfter
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:div\n
    "},{"location":"asl/ref/drop/","title":"drop","text":"Input Stack:a \u21e8 Output Stack:

    Remove the item on the top of the stack.

    Example:

    a,b,c,:drop\n
    PosInputOutput 0 c b 1 b a 2 a
    :drop\n

    Warning

    Throws an exception due to an empty stack.

    "},{"location":"asl/ref/dup/","title":"dup","text":"Input Stack:a: ? \u21e8 Output Stack:a: ?a: ?

    Duplates the item on the top of the stack.

    Example:

    BeforeAfter
    minuteOfDay,:time\n
    minuteOfDay,:time,\n:dup\n
    "},{"location":"asl/ref/each/","title":"each","text":"Input Stack:function: Listitems: List \u21e8 Output Stack:function(items[N-1])...function(items[0])

    Pops a list off the stack and executes it as a program.

    Example:

    (,a,b,),(,:dup,\n),:each\n
    PosInputOutput 0 List(:dup) a 1 List(a, b) a 2 b 3 b"},{"location":"asl/ref/ends/","title":"ends","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key has the specified suffix. For example, consider the following query:

    name,ssCpuUser,:ends\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/eq/","title":"eq","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k == v): Query

    Select time series that have a specified value for a key. For example, consider the following query:

    name,ssCpuUser,:eq\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/eureka-avg/","title":"eureka-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper to compute an average using the number of instances in the UP state based on the discovery.status metric as the denominator. The common infrastructure tags will be used to restrict the scope for the denominator. This operator should be used if the numerator is based on incoming traffic that is routed via the Eureka service and goal is to compute an average per node receiving traffic.

    name,sps,:eq,\nnf.app,nccp,:eq,\n:and,\n:eureka-avg\n
    "},{"location":"asl/ref/fadd/","title":"fadd","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 + ts2): TimeSeriesExpr

    Floating point addition operator. Compute a new time series where each interval has the value (a + b) where a and b are the corresponding intervals in the input time series.

    :fadd 3.0 0.0 1.0 NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. This can lead to confusing behavior if added to a line that does have data as the result will be NaN. Use the add operator to treat NaN values as zero for combining with other time series.

    Example adding a constant:

    BeforeAfter
    name,sps,:eq,\n30e3\n
    name,sps,:eq,\n30e3,:fadd\n

    Example adding two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fadd\n
    "},{"location":"asl/ref/false/","title":"false","text":"Input Stack: \u21e8 Output Stack:Query

    Query expression that will not match any input time series. See also :true.

    "},{"location":"asl/ref/fcall/","title":"fcall","text":"Input Stack:String... \u21e8 Output Stack:?

    Shorthand equivalent to writing: :get,:call

    Example:

    duplicate,(,:dup,\n),:set,\na,duplicate,:fcall\n
    PosInputOutput 0 duplicate a 1 a a"},{"location":"asl/ref/fdiv/","title":"fdiv","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 / ts2): TimeSeriesExpr

    Floating point division operator. Compute a new time series where each interval has the value (a / b) where a and b are the corresponding intervals in the input time series.

    :fdiv 2.0 NaN Inf NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. Zero divided by zero can also occur due to lack of activity in some windows. Unless you really need strict floating point behavior, use the div operator to get behavior more appropriate for graphs.

    Example dividing a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:fdiv\n

    Example dividing two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fdiv\n
    "},{"location":"asl/ref/filter/","title":"filter","text":"Input Stack:TimeSeriesExprTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Filters the results of a grouped expression by another expression. The filter expression is a set of signal time series indicating if the corresponding time series from the original expression should be shown. Simple example that suppresses all lines:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n0,:filter\n

    Filtering is most commonly performed using the summary statistics for the original expression. For example, to show only the lines that have an average value across the query window greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:dup,\navg,:stat,\n5e3,:gt,\n:over,\navg,:stat,\n20e3,:lt,\n:and,\n:filter\n

    There are helpers, :stat-$(name), to express this common pattern more easily for filters. They act as place holders for the specified statistic on the input time series. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria. See the :stat operator for more details on available statistics. For this example, :stat-avg would be used:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-avg,\n5e3,:gt,\n:stat-avg,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/fmul/","title":"fmul","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 * ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a * b) where a and b are the corresponding intervals in the input time series.

    Example multiplying a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:fmul\n

    Example multiplying two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fmul\n
    "},{"location":"asl/ref/format/","title":"format","text":"Input Stack:args: Listpattern: String \u21e8 Output Stack:str: String

    Format a string using a printf style pattern.

    Example:

    foo%s,(,bar,),:format\n
    PosInputOutput 0 List(bar) foobar 1 foo%s"},{"location":"asl/ref/freeze/","title":"freeze","text":"Input Stack:... \u21e8 Output Stack:

    Freeze removes all data from the stack and pushes it to a separate frozen stack that cannot be modified other than to push additional items using the freeze operation. The final stack at the end of the execution will include the frozen contents along with any thing that is on the normal stack.

    This operation is useful for isolating common parts of the stack while still allowing tooling to manipulate the main stack using concatenative rewrite operations. The most common example of this is the :cq operation used to apply a common query to graph expressions. For a concrete example, suppose you want to have an overlay expression showing network errors on a switch that you want to add in to graphs on a dashboard. The dashboard allows drilling into the graphs by selecting a particular cluster. To make this work the dashboard appends a query rewrite to the expression like:

    ,:list,(,nf.cluster,{{ selected_cluster }},:eq,:cq,),:each\n

    This :list operator will apply to everything on the stack. However, this is problematic because the cluster restriction will break the overlay query. Using the freeze operator the overlay expression can be isolated from the main stack. So the final expression would look something like:

    # Query that should be used as is and not modified further\nname,networkErrors,:eq,:sum,50,:gt,:vspan,40,:alpha,\n:freeze,\n\n# Normal contents of the stack\nname,ssCpuUser,:eq,:avg,1,:axis,\nname,loadavg1,:eq,:avg,2,:axis,\n\n# Rewrite appended by tooling, only applies to main stack\n:list,(,nf.cluster,{{ selected_cluster }},:eq,:cq,),:each\n

    Since: 1.6

    Example:

    a,b,c,:freeze\n
    PosInputOutput 0 c c 1 b b 2 a a"},{"location":"asl/ref/fsub/","title":"fsub","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 - ts2): TimeSeriesExpr

    Floating point subtraction operator. Compute a new time series where each interval has the value (a - b) where a and b are the corresponding intervals in the input time series.

    :fsub 1.0 0.0 1.0 NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. This can lead to confusing behavior if added to a line that does have data as the result will be NaN. Use the sub operator to treat NaN values as zero for combining with other time series.

    Example subtracting a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n30000,:fsub\n

    Example subtracting two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fsub\n
    "},{"location":"asl/ref/ge/","title":"ge","text":"

    Greater than or equal operator. There are two variants of the :ge operator.

    "},{"location":"asl/ref/ge/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k >= v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is greater than or equal to a specified value. For example, consider the following query:

    name,ssCpuSystem,:ge\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/ge/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 >= ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a >= b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a >= b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 0.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Info

    Note, the data points have floating point values. It is advisable to avoid relying on an exact equality match.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:ge\n
    "},{"location":"asl/ref/get/","title":"get","text":"Input Stack:k \u21e8 Output Stack:vars[k]

    Get the value of a variable and push it on the stack.

    Example:

    k,v,:set,\nk,:get\n
    PosInputOutput 0 k v"},{"location":"asl/ref/gt/","title":"gt","text":"

    Greater than operator. There are two variants of the :gt operator.

    "},{"location":"asl/ref/gt/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k > v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is greater than a specified value. For example, consider the following query:

    name,ssCpuSystem,:gt\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/gt/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 > ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a > b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a > b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 0.0 00:04 0.5 1.7 0.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:gt\n
    "},{"location":"asl/ref/has/","title":"has","text":"Input Stack:k: String \u21e8 Output Stack:Query

    Select time series that have a specified key. For example, consider the following query:

    nf.node,:has\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp ssCpuUser api i-0456"},{"location":"asl/ref/head/","title":"head","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :limit

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n2,:head\n
    "},{"location":"asl/ref/heatmap/","title":"heatmap","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Since 1.8.

    Plot the time series as a heatmap.

    See heatmap for more information.

    Example:

    Default
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"asl/ref/in/","title":"in","text":"Input Stack:vs: List[String]k: String \u21e8 Output Stack:(k in vs): Query

    Select time series where the value for a key is in the specified set. For example, consider the following query:

    name,(,ssCpuUser,ssCpuSystem,),:in\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/integral/","title":"integral","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Sum the values across the evaluation context. This is typically used to approximate the distinct number of events that occurred. If the input is non-negative, then each datapoint for the output line will represent the area under the input line from the start of the graph to the time for that datapoint. Missing values, NaN, will be treated as zeroes. For example:

    Input :integral 0 0 1 1 -1 0 NaN 0 0 0 1 1 2 3 1 4 1 5 0 5

    For a counter, each data point represents the average rate per second over the step interval. To compute the total amount incremented, the value first needs to be converted to a rate per step interval. This conversion can be performed using the :per-step operation.

    Examples:

    BeforeAfter
    1\n
    1,:integral\n
    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum,\n:per-step\n
    name,requestsPerSecond,:eq,\n:sum,\n:per-step,\n:integral\n
    "},{"location":"asl/ref/le/","title":"le","text":"

    Less than or equal operator. There are two variants of the :le operator.

    "},{"location":"asl/ref/le/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k <= v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is less than or equal to a specified value. For example, consider the following query:

    name,ssCpuSystem,:le\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/le/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 <= ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a <= b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a <= b 00:01 0.0 0.0 1.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:le\n
    "},{"location":"asl/ref/legend/","title":"legend","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the legend text. Legends can contain variables based on the exact keys matched in the query clause and keys used in a group by. Variables start with a $ sign and can optionally be enclosed between parentheses. The parentheses are required for cases where the characters immediately following the name could be a part of the name. If a variable is not defined, then the name of the variable will be used as the substitution value.

    The variable atlas.offset can be used to indicate the time shift used for the underlying data.

    Examples:

    BeforeAfter
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\n$name,:legend\n
    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\ncluster+$nf.cluster,:legend\n
    "},{"location":"asl/ref/limit/","title":"limit","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Restrict the output to the first N lines from the input expression. The lines will be chosen in order based on the sort and order used.

    Example:

    AfterAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n3,:limit\n
    "},{"location":"asl/ref/line/","title":"line","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be line. This is the default mode and usually does not need to be set explicitly.

    See the line style examples page for more information.

    Example:

    Default
    name,sps,:eq,\n:sum,\n:line\n
    "},{"location":"asl/ref/list/","title":"list","text":"Input Stack:... \u21e8 Output Stack:List[?]

    Pop all items off the stack and push them as a list.

    Example:

    a,b,:list\n
    PosInputOutput 0 b List(b, a) 1 a
    ,:list\n
    PosInputOutput 0 List()"},{"location":"asl/ref/ls/","title":"ls","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the line style. The value should be one of:

    See the line style examples page for more information.

    Example:

    LineArea
    name,sps,:eq,\n:sum,\n(,name,),:by,\nline,:ls\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\narea,:ls\n
    StackVSpan
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nstack,:ls\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\n200e3,:gt,\nvspan,:ls\n
    "},{"location":"asl/ref/lt/","title":"lt","text":"

    Less than operator. There are two variants of the :lt operator.

    "},{"location":"asl/ref/lt/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k < v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is less than a specified value. For example, consider the following query:

    name,ssCpuSystem,:lt\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/lt/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 < ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a < b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a < b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 0.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:lt\n
    "},{"location":"asl/ref/lw/","title":"lw","text":"Input Stack:IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    The width of the stroke used when drawing the line.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\n2,:lw\n
    "},{"location":"asl/ref/map/","title":"map","text":"Input Stack:function: Listitems: List \u21e8 Output Stack:List(function(items[0], ..., items[N-1])

    Create a new list by applying a function to all elements of a list.

    Example:

    (,a%s,b%s,),(,(,.netflix.com,),:format,\n),:map\n
    PosInputOutput 0 List((, .netflix.com, ), :format) List(a.netflix.com, b.netflix.com) 1 List(a%s, b%s)"},{"location":"asl/ref/max/","title":"max","text":"

    Max aggregation operator. There are two variants of the :max operator.

    "},{"location":"asl/ref/max/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Select the maximum value for corresponding times across all matching time series.

    name,ssCpuUser,:eq,\n:max\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the max. This leads to a final result of:

    NameData ssCpuUser [8.0, 7.0, 6.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/max/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Select the maximum value for corresponding times across the time series resulting from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\n:max\n
    "},{"location":"asl/ref/median/","title":"median","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: (,50,),:percentiles

    BeforeAfter
    name,requestLatency,:eq\n
    name,requestLatency,:eq,\n:median\n
    "},{"location":"asl/ref/min/","title":"min","text":"

    Min aggregation operator. There are two variants of the :min operator.

    "},{"location":"asl/ref/min/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Select the minimum value for corresponding times across all matching time series.

    name,ssCpuUser,:eq,\n:min\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the max. This leads to a final result of:

    NameData ssCpuUser [1.0, 2.0, 2.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/min/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Select the minimum value for corresponding times across the time series resulting from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\n:min\n
    "},{"location":"asl/ref/mul/","title":"mul","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 * ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a * b) where a and b are the corresponding intervals in the input time series. NaNs in a series when other series are present are treated as 1.

    Example multiplying a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:mul\n

    Example multiplying two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:mul\n
    "},{"location":"asl/ref/named-rewrite/","title":"named-rewrite","text":"Input Stack:name: Stringrewritten: TimeSeriesExproriginal: TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Internal operation used by some macros to provide a more user friendly display expression. The expanded version will get used for evaluation, but if a new expression is generated from the parsed expression tree it will use the original version along with the named of the macro.

    BeforeAfter
    name,ssCpuUser,:eq,\n:dup,\n:dup,\n:sum,\n:swap,\n:count,\n:div\n
    name,ssCpuUser,:eq,\n:dup,\n:dup,\n:sum,\n:swap,\n:count,\n:div,\navg,:named-rewrite\n
    "},{"location":"asl/ref/ndrop/","title":"ndrop","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN

    Remove the top N items on the stack.

    Example:

    a,0,:ndrop\n
    PosInputOutput 0 0 a 1 a
    a,b,c,2,:ndrop\n
    PosInputOutput 0 2 a 1 c 2 b 3 a
    a,b,c,4,:ndrop\n
    PosInputOutput 0 4 1 c 2 b 3 a
    ,:ndrop\n

    Warning

    Throws an exception due to missing the N param.

    "},{"location":"asl/ref/neg/","title":"neg","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the negated value of the input time series.

    Example:

    064-64
    0,:neg\n
    64,:neg\n
    -64,:neg\n
    "},{"location":"asl/ref/nip/","title":"nip","text":"Input Stack:? \u21e8 Output Stack:?

    Shorthand equivalent to writing: :swap,:drop

    Example:

    a,b,:nip\n
    PosInputOutput 0 b b 1 a"},{"location":"asl/ref/nlist/","title":"nlist","text":"Input Stack:Na0...aN \u21e8 Output Stack:List(aN-1, ..., a0)aN

    Create a list with the top N items on the stack.

    Since: 1.5.0

    Examples:

    a,0,:nlist\n
    PosInputOutput 0 0 List() 1 a a
    a,b,c,2,:nlist\n
    PosInputOutput 0 2 List(b, c) 1 c a 2 b 3 a
    a,b,c,4,:nlist\n
    PosInputOutput 0 4 List(a, b, c) 1 c 2 b 3 a"},{"location":"asl/ref/node-avg/","title":"node-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper to compute an average using the poller.asg.instance metric as the denominator. The common infrastructure tags will be used to restrict the scope for the denominator. This operator should be used instead of :avg if the goal is to compute an average per node.

    name,sps,:eq,\nnf.app,nccp,:eq,\n:and,\n:node-avg\n
    "},{"location":"asl/ref/not/","title":"not","text":"Input Stack:q: Query \u21e8 Output Stack:(!q): Query

    Select time series that have a specified key. For example, consider the following query:

    nf.node,:has,\n:not\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp ssCpuUser api i-0456"},{"location":"asl/ref/offset/","title":"offset","text":"Input Stack:DurationTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Warning

    Note that there is a deprecated List[Duration] variant that only modifes the presentation at the end. It cannot be used along with math operations.

    Shift the time frame to use when fetching the data. This is used to look at a previous interval as a point of reference, e.g., day-over-day or week-over-week. Offset cannot be used with streaming execution of the query, consider using the delay operator for short intervals to detect a change.

    Examples:

    BeforeAfterCombined
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\n1w,:offset\n
    name,sps,:eq,\n(,name,),:by,\n:dup,\n1w,:offset\n
    BeforeAfterCombined
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\nPT1H,:offset\n
    name,sps,:eq,\n(,name,),:by,\n:dup,\nPT1H,:offset\n
    "},{"location":"asl/ref/or/","title":"or","text":"

    There are two variants of the :or operator.

    "},{"location":"asl/ref/or/#choosing","title":"Choosing","text":"Input Stack:q2: Queryq1: Query \u21e8 Output Stack:(q1 OR q2): Query

    This first variant is used for choosing the set of time series to operate on. It is a binary operator that matches if either of the sub-queries match. For example, consider the following query:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:or\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/or/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 OR ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a OR b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a OR b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for all intervals where the corresponding values of a or b are non-zero. Example:

    BeforeAfter
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n290,:lt\n
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n290,:lt,\n:or\n
    "},{"location":"asl/ref/order/","title":"order","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Order to use for sorting results. Supported values are asc and desc for ascending and descending order respectively. Default is asc.

    Since: 1.5

    Examples:

    SortedDefault
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nmax,:sort,\nasc,:order\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\ndesc,:order\n
    "},{"location":"asl/ref/over/","title":"over","text":"Input Stack:ba \u21e8 Output Stack:aba

    Copy the item in the second position on the stack to the top.

    Example:

    a,b,:over\n
    PosInputOutput 0 b a 1 a b 2 a"},{"location":"asl/ref/palette/","title":"palette","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the palette to use for the results of an expression. This operator is allows for scoping a palette to a particular group by instead of to all lines that share the same axis. A common use-case is to have multiple stacked group by expressions using different palettes. For example, suppose I want to create a graph showing overall request per second hitting my services with successful requests shown in shades of green and errors in shades of red. This can make it easy to visually see if a change is due to an increase in errors:

    Or a spike in successful requests:

    Examples:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\nreds,:palette\n
    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nreds,:palette\n
    "},{"location":"asl/ref/pct/","title":"pct","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :dup,:dup,:sum,:div,100,:mul,pct,:named-rewrite The percent contribution of an individual time series to a group.

    Example:

    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct\n
    BeforeAfterStack to 100%
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct,\n:stack\n
    "},{"location":"asl/ref/per-step/","title":"per-step","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Converts a line from a rate per second to a rate based on the step size of the graph. This is useful for getting an estimate of the raw number of events for a given interval.

    064-64
    0,:per-step\n
    64,:per-step\n
    -64,:per-step\n
    "},{"location":"asl/ref/percentiles-heatmap/","title":"percentiles-heatmap","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Since 1.8.

    Group the metric by the percentiles tag and plot the data as a heatmap. Requires that the metric to be recorded as a percentile.

    See heatmap for more information.

    Shorthand equivalent of writing (,percentile,),:by,:heatmap

    Example:

    Default
    name,requestLatency,:eq,\n:percentiles-heatmap\n
    "},{"location":"asl/ref/percentiles/","title":"percentiles","text":"Input Stack:percentiles: ListQuery \u21e8 Output Stack:TimeSeriesExpr

    Estimate percentiles for a timer or distribution summary. The data must have been published appropriately to allow the approximation. If using spectator, then see PercentileTimer and PercentileDistributionSummary helper classes.

    The percentile values can be shown in the legend using $percentile.

    Since: 1.5.0 (first in 1.5.0-rc.4)

    BeforeAfter
    name,requestLatency,:eq\n
    name,requestLatency,:eq,\n(,25,50,90,),:percentiles\n
    "},{"location":"asl/ref/pick/","title":"pick","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN-1a0...aN

    Pick an item in the stack and put a copy on the top.

    Since: 1.5.0

    Example:

    a,0,:pick\n
    PosInputOutput 0 0 a 1 a a
    a,b,0,:pick\n
    PosInputOutput 0 0 b 1 b b 2 a a
    a,b,1,:pick\n
    PosInputOutput 0 1 a 1 b b 2 a a"},{"location":"asl/ref/pow/","title":"pow","text":"Input Stack:TimeSeriesExprTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the value (a power b) where a and b are the corresponding intervals in the input time series.

    Examples:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n42,:pow\n
    BeforeAfter
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:pow\n
    "},{"location":"asl/ref/random/","title":"random","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Generate a time series that appears to be random noise for the purposes of experimentation and generating sample data. To ensure that the line is deterministic and reproducible it actually is based on a hash of the timestamp. Each datapoint is a value between 0.0 and 1.0.

    Random
    :random\n
    "},{"location":"asl/ref/re/","title":"re","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k=~/^v/): Query

    Warning

    Regular expressions can be expensive to check and should be avoided if possible. When designing data to publish ensure that common query patterns would not need the use of regular expressions.

    Select time series where the value for a key matches the specified regular expression. For example, consider the following query:

    name,ssCpu,:re\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456

    The regular expression value will be automatically anchored at the start and the matching is case sensitive. Always try to have a simple prefix on the expression to allow for more efficient matching of the expression. For more information on supported patterns, see the Java regular expressions documentation.

    "},{"location":"asl/ref/reic/","title":"reic","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k=~/^v/i): Query

    Warning

    Ignoring the case will always result if a full scan for the key. This should be used sparingly and only for tag queries. If a case-insensitive match is not required, use :re intead.

    Select time series where the value for a key matches the specified regular expression with case insensitive matching. For example, consider the following query:

    name,ssCPU,:reic\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456

    Notice that the casing for the query does not match the data. The regular expression value will be automatically anchored at the start. For more information on supported patterns, see the Java regular expressions documentation.

    "},{"location":"asl/ref/roll/","title":"roll","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN-1a0...aN-2aN

    Rotate an item in the stack and put it on the top.

    Since: 1.5.0

    Example:

    a,0,:roll\n
    PosInputOutput 0 0 a 1 a
    a,b,0,:roll\n
    PosInputOutput 0 0 b 1 b a 2 a
    a,b,1,:roll\n
    PosInputOutput 0 1 a 1 b b 2 a"},{"location":"asl/ref/rolling-count/","title":"rolling-count","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Number of occurrences within a specified window. This operation is frequently used in alerting expressions to reduce noise. For example:

    # Check to see if average cpu usage is > 80%\nname,cpuUser,:eq,:avg,80,:gt,\n\n# Only alert if that is true for more than 3 of the last 5\n# datapoints\n5,:rolling-count,3,:gt\n

    A value is counted if it is non-zero. Missing values, NaN, will be treated as zeroes. For example:

    Input 3,:rolling-count 0 0 1 1 -1 2 NaN 2 0 1 1 1 1 2 1 3 1 3 0 2

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    BeforeAfter
    :random,\n0.4,:gt\n
    :random,\n0.4,:gt,\n5,:rolling-count\n
    "},{"location":"asl/ref/rolling-max/","title":"rolling-max","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Maximum value within a specified window. This operation can be used in alerting expressions to find a lower bound for noisy data based on recent samples. For example:

    name,sps,:eq,:sum,\n:dup,\n5,:rolling-max\n

    Missing values, NaN, will be ignored when computing the min. If all values within the window are NaN, then NaN will be emitted. For example:

    Input 3,:rolling-max 0 0 1 1 -1 1 NaN 1 0 0 1 1 1 1 1 1 1 1 0 1

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    :random,\n0.4,:gt\n
    :random,\n0.4,:gt,\n5,:rolling-max\n
    "},{"location":"asl/ref/rolling-mean/","title":"rolling-mean","text":"Input Stack:minNumValues: Intn: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Mean of the values within a specified window. The mean will only be emitted if there are at least a minimum number of actual values (not NaN) within the window. Otherwise NaN will be emitted for that time period.

    Input 3,2,:rolling-mean 0 NaN 1 0.5 -1 0.0 NaN 0.0 NaN NaN 0 NaN 1 0.5 1 0.667 1 1 0 0.667

    The window size, n, is the number of datapoints to consider including the current value. There must be at least minNumValues non-NaN values within that window before it will emit a mean. Note that it is based on datapoints, not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,3,:rolling-mean\n
    "},{"location":"asl/ref/rolling-min/","title":"rolling-min","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Minimum value within a specified window. This operation can be used in alerting expressions to find a lower bound for noisy data based on recent samples. For example:

    name,sps,:eq,:sum,\n:dup,\n5,:rolling-min\n

    Missing values, NaN, will be ignored when computing the min. If all values within the window are NaN, then NaN will be emitted. For example:

    Input 3,:rolling-min 0 0 1 0 -1 -1 NaN -1 0 -1 1 0 1 0 1 1 1 1 0 0

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,:rolling-min\n
    "},{"location":"asl/ref/rolling-sum/","title":"rolling-sum","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Sum of the values within a specified window.

    Input 3,:rolling-sum 0 0.0 1 1.0 -1 0.0 NaN 0.0 NaN -1.0 NaN NaN 1 1.0 1 2.0 1 3.0 0 2.0

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints, not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,:rolling-sum\n
    "},{"location":"asl/ref/rot/","title":"rot","text":"Input Stack:b...a \u21e8 Output Stack:ab...

    Rotate the stack so that the item at the bottom is now at the top.

    Example:

    a,b,c,d,:rot\n
    PosInputOutput 0 d a 1 c d 2 b c 3 a b"},{"location":"asl/ref/s/","title":"s","text":"Input Stack:replacement: StringsearchPattern: StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Perform a search and replace on the legend strings. This command is similar to the global search and replace (s/regexp/replace/g) operation from tools like vim or sed.

    The replacement string can use variables to refer to the capture groups of the input expression. The syntax is that same as for legends.

    Since: 1.6

    Examples:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n^nccp-(.*)$,$1,:s\n
    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n^nccp-(?.*)$,$stack,:s\n\n\n\nBeforeAfter\n\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\nnccp-,_,:s\n
    \n\n\nBeforeAfter\n\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n([a-z]),_$1,:s\n
    "},{"location":"asl/ref/sdes-fast/","title":"sdes-fast","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to quickly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-fast\n
    "},{"location":"asl/ref/sdes-simple/","title":"sdes-simple","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :dup,10,0.1,0.5,:sdes,sdes-simple,:named-rewrite

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-simple\n
    "},{"location":"asl/ref/sdes-slow/","title":"sdes-slow","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to slowly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-slow\n
    "},{"location":"asl/ref/sdes-slower/","title":"sdes-slower","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to slowly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-slower\n
    "},{"location":"asl/ref/sdes/","title":"sdes","text":"Input Stack:beta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Variant of :des that is deterministic as long as the step size does not change. One of the common complaints with DES is that to get the same value for a given time you must start feeding in data at exactly the same time. So for normal graphs where it is computed using the window of the chart it will have slightly different predictions for a given time. As it is often used for alerting this makes it cumbersome to try and determine:

    1. Why an alarm fired
    2. When alarms would have fired for tuning

    Sliding DES uses two DES functions and alternates between them. One will get trained while the other is getting used, and then the one that was getting used will get reset and the roles swapped.

     F1 | A |-- T1 --|-- P1 --|-- T1 --|-- P1 --|-- T1 --|\n F2 | A |        |-- T2 --|-- P2 --|-- T2 --|-- P2 --|\n\nResult:\n\n R  |-- NaN -----|-- P1 --|-- P2 --|-- P1 --|-- P2 --|\n

    Both functions will ignore any data until it reaches a boundary, even multiple, of the training window. That is shown as A in the diagram above. The first function will then start training, T1, and after the training window the first predicted values, P1, will get generated. The ouput line will alternate between the predictions from both DES functions.

    The alternation between functions can cause the prediction line to look choppier than DES, e.g., on a gradual drop:

    Further, since each prediction only considers data for a narrow window it will adjust to sharp changes faster. For example:

    Since: 1.5.0

    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum,\n:per-step\n
    name,requestsPerSecond,:eq,\n:sum,\n5,0.1,0.5,:sdes\n
    "},{"location":"asl/ref/set/","title":"set","text":"Input Stack:vk \u21e8 Output Stack:

    Set the value of a variable.

    Example:

    k,v,:set\n
    PosInputOutput 0 v 1 k"},{"location":"asl/ref/sort/","title":"sort","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Sort the results of an expression in the legend by one of the summary statistics or by the legend text. The default behavior is to sort by the legend text. This will sort in ascending order by default, for descending order use order.

    Since: 1.5

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nmax,:sort\n
    "},{"location":"asl/ref/sqrt/","title":"sqrt","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the square root of the value from the input time series.

    064-64
    0,:sqrt\n
    64,:sqrt\n
    -64,:sqrt\n
    "},{"location":"asl/ref/srandom/","title":"srandom","text":"Input Stack:seed: Int \u21e8 Output Stack:TimeSeriesExpr

    Generate a time series that appears to be random noise for the purposes of experimentation and generating sample data. To ensure that the line is deterministic and reproducible it actually is based on a hash of the timestamp. The seed value is used to vary the values for the purposes of creating multiple different sample lines. Each datapoint is a value between 0.0 and 1.0.

    Example:

    Seeded Random: /api/v1/graph?w=200&h=125&s=e-3h&e=2012-01-01T07:00&tz=UTC&q=42,:srandom @@@"},{"location":"asl/ref/sset/","title":"sset","text":"Input Stack:kv \u21e8 Output Stack:

    Shorthand equivalent to writing: :swap,:set

    Example:

    a,b,:sset\n
    PosInputOutput 0 b 1 a"},{"location":"asl/ref/stack/","title":"stack","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be stack. In this mode the line will be filled to the previous stacked line on the same axis.

    See the line style examples page for more information.

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stack\n
    "},{"location":"asl/ref/starts/","title":"starts","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key has the specified prefix. For example, consider the following query:

    name,ssCpu,:starts\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/stat-avg-mf/","title":"stat-avg-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to avg,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-avg-mf\n
    "},{"location":"asl/ref/stat-avg/","title":"stat-avg","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the avg,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have an average value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-avg,\n5e3,:gt,\n:stat-avg,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-count/","title":"stat-count","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the count,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the count value is greater than 50:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-count,\n50,:gt,\n:filter\n
    "},{"location":"asl/ref/stat-last/","title":"stat-last","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the last,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the last value is greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-last,\n5e3,:gt,\n:stat-last,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-max-mf/","title":"stat-max-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to max,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-max-mf\n
    "},{"location":"asl/ref/stat-max/","title":"stat-max","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the max,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have a maximum value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-max,\n5e3,:gt,\n:stat-max,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-min-mf/","title":"stat-min-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to min,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-min-mf\n
    "},{"location":"asl/ref/stat-min/","title":"stat-min","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the min,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have a minimum value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-min,\n5e3,:gt,\n:stat-min,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-total/","title":"stat-total","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the total,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the sum of all data points for the line is greater than 1M and less than 4M:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-total,\n1e6,:gt,\n:stat-total,\n4e6,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat/","title":"stat","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Create a summary time series showing the value of the specified summary statistic for the data points of the input time series. Valid statistic values are avg, count, max, min, last, and total. The graph below shows avg, max, min, and last for a simple input time series:

    The count is the number of data points for the time series. In the example above, that is five since the last value is NaN. The total is the sum of the data points for the time series.

    The most common usage of stats is in conjunction with :filter to restrict the set of results for grouped expression. When filtering, helper macros, :stat-$(name), can be used to represent applying the statistic to the input time series being filtered without explicitly repeating the input expression.

    Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\navg,:stat\n
    "},{"location":"asl/ref/stddev/","title":"stddev","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the standard deviation for the results of a group by. If the underlying data is for a timer or distribution summary, then dist-stddev is likely a better choice.

    Since: 1.6

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stddev\n
    "},{"location":"asl/ref/sub/","title":"sub","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 - ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a subtractNaN b) where a and b are the corresponding intervals in the input time series.

    :sub 1.0 0.0 1.0 1.0 NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Use the fsub operator to get strict floating point behavior.

    Example subtracting a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n30e3,:sub\n

    Example subtracting two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:sub\n
    "},{"location":"asl/ref/sum/","title":"sum","text":"

    Sum aggregation operator. There are two variants of the :sum operator.

    "},{"location":"asl/ref/sum/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Compute the sum of all the time series that match the query. Sum is the default aggregate used if a query is specified with no explicit aggregate function. Example with implicit sum:

    name,ssCpuUser,:eq\n

    Equivalent example with explicit sum:

    name,ssCpuUser,:eq,\n:sum\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the sum. This leads to a final result of:

    NameData ssCpuUser [10.0, 11.0, 8.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/sum/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the sum of all the time series from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by,\n:sum\n
    "},{"location":"asl/ref/swap/","title":"swap","text":"Input Stack:ba \u21e8 Output Stack:ab

    Swap the top two items on the stack.

    Example:

    a,b,:swap\n
    PosInputOutput 0 b a 1 a b"},{"location":"asl/ref/time-span/","title":"time-span","text":"Input Stack:e: Strings: String \u21e8 Output Stack:TimeSeriesExpr

    Generates a signal line based on the specified time range. The line will be 1 within the range and 0 for all other times. The format of the start and end times is the same as the start and end time parameters on the Graph API. If the time zone is not explicitly specified, then the value from the tz variable will get used. The default value for the tz variable is the primary time zone used for the graph.

    The following named times are supported for time spans:

    Name Description gs Graph start time. ge Graph end time. s Start time for the span, can only be used for the end time. e End time for the span, can only be used for the start time. now Current time. epoch January 1, 1970 UTC.

    Since: 1.6

    Example:

    RelativeAbsolute
    e-30m,ge,:time-span\n
    2014-02-20T13:00,s%2B30m,:time-span\n
    "},{"location":"asl/ref/time/","title":"time","text":"Input Stack:String \u21e8 Output Stack:TimeSeriesExpr

    Generates a line based on the current time. Supported modes are:

    • secondOfMinute
    • secondOfDay
    • minuteOfHour
    • minuteOfDay
    • hourOfDay
    • dayOfWeek
    • dayOfMonth
    • dayOfYear
    • monthOfYear
    • yearOfCentury
    • yearOfEra
    • seconds (since epoch)
    • days (since epoch)

    The mode can also be a value of the enum ChronoField.

    Examples:

    Hour of DayEnum
    hourOfDay,:time\n
    HOUR_OF_DAY,:time\n
    "},{"location":"asl/ref/topk-others-avg/","title":"topk-others-avg","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes an average aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-avg\n
    "},{"location":"asl/ref/topk-others-max/","title":"topk-others-max","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a max aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-max\n
    "},{"location":"asl/ref/topk-others-min/","title":"topk-others-min","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a min aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-min\n
    "},{"location":"asl/ref/topk-others-sum/","title":"topk-others-sum","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a sum aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-sum\n
    "},{"location":"asl/ref/topk/","title":"topk","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk\n

    In some cases it can be useful to see an aggregate summary of the other time series that were not part of the top set. This can be accomplished using the :topk-others-$(aggr) operators. For more details see:

    • :topk-others-avg
    • :topk-others-max
    • :topk-others-min
    • :topk-others-sum
    "},{"location":"asl/ref/trend/","title":"trend","text":"Input Stack:window: DurationTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Warning

    Deprecated: Use :rolling-mean instead.

    Computes a moving average over the input window. Until there is at least one sample for the whole window it will emit NaN. If the input line has NaN values, then they will be treated as zeros. Example:

    Input 2m,:trend 5m,:trend 0 NaN NaN 1 0.5 NaN -1 0.0 NaN NaN -0.5 NaN 0 0.0 0.0 1 0.5 0.2 2 1.5 0.4 1 1.5 0.8 1 1.0 1.0 0 0.5 1.0

    The window size is specified as a range of time. If the window size is not evenly divisible by the step size, then the window size will be rounded down. So a 5m window with a 2m step would result in a 4m window with two datapoints per average. A step size larger than the window will result in the trend being a no-op.

    Examples:

    5 Minutes20 Minutes
    :random,\nPT5M,:trend\n
    :random,\n20m,:trend\n
    "},{"location":"asl/ref/true/","title":"true","text":"Input Stack: \u21e8 Output Stack:Query

    Query expression that will match any input time series. See also :false.

    "},{"location":"asl/ref/tuck/","title":"tuck","text":"Input Stack:ba \u21e8 Output Stack:bab

    Shorthand equivalent to writing: :swap,:over

    Example:

    a,b,:tuck\n
    PosInputOutput 0 b b 1 a a 2 b"},{"location":"asl/ref/vspan/","title":"vspan","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be a vertical span. In this mode any non-zero datapoints on the line will be shown as a span. This is frequently used to visualize when an alert would have fired.

    See the line style examples page for more information.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n:dup,\n20e3,:gt\n
    name,sps,:eq,\n:sum,\n:dup,\n20e3,:gt,\n:vspan\n
    "},{"location":"concepts/consolidation/","title":"Consolidation","text":"

    TODO

    "},{"location":"concepts/naming/","title":"Naming","text":""},{"location":"concepts/naming/#summary","title":"Summary","text":"
    1. Names
      • Describe the measurement being collected
      • Use camelCase
      • Static
      • Succinct
    2. Tags
      • Should be used for dimensional filtering
      • Be careful about combinatorial explosion
      • Tag keys should be static
      • Use id to distinguish between instances
    3. Use Base Units
    "},{"location":"concepts/naming/#names","title":"Names","text":""},{"location":"concepts/naming/#describe-the-measurement","title":"Describe the Measurement","text":""},{"location":"concepts/naming/#use-camelcase","title":"Use camelCase","text":"

    The main goal here is to promote consistency, which makes it easier for users. The choice of style is somewhat arbitrary, but camelCase was chosen because:

    • Used by SNMP
    • Used by Java
    • It was commonly used at Netflix when the guideline was written

    The exception to this rule is where there is an established common case. For example, with Amazon regions, it is preferred to use us-east-1 rather than usEast1 as it is the more common form.

    "},{"location":"concepts/naming/#static","title":"Static","text":"

    There should not be any dynamic content in a metric name, such as requests.$APP_NAME. Metric names and tag keys are how users interact with the data, and dynamic values make them difficult to use. Dynamic information is better suited for tag values, such as nf.app or status.

    "},{"location":"concepts/naming/#succinct","title":"Succinct","text":"

    Long names should be avoided. In many cases, long names are the result of combining many pieces of information together into a single string. In this case, consider either discarding information that is not useful or encoding the information in tag values.

    "},{"location":"concepts/naming/#tags","title":"Tags","text":"

    Historically, tags have been used to play one of two roles:

    • Dimensions. This is the primary use of tags and this feature allows the data to be filtered into subsets by values of interest.
    • Namespace. Similar to packages in Java, this allows grouping related data. This type of usage is discouraged.

    As a general rule, it should be possible to use the name as a pivot. If only the name is selected, then the user should be able to use other dimensions to filter the data and successfully reason about the value being shown.

    As a concrete example, suppose we have two metrics:

    1. The number of threads currently in a thread pool.
    2. The number of rows in a database table.
    "},{"location":"concepts/naming/#discouraged-approach","title":"Discouraged Approach","text":"
    Id poolSize = registry.createId(\"size\")\n  .withTag(\"class\", \"ThreadPool\")\n  .withTag(\"id\", \"server-requests\");\n\nId poolSize = registry.createId(\"size\")\n  .withTag(\"class\", \"Database\")\n  .withTag(\"table\", \"users\");  \n

    In this approach, if you select the name size, then it will match both the ThreadPool and Database classes. This results in a value that is the an aggregate of the number of threads and the number of items in a database, which has no meaning.

    "},{"location":"concepts/naming/#recommended-approach","title":"Recommended Approach","text":"
    Id poolSize = registry.createId(\"threadpool.size\")\n  .withTag(\"id\", \"server-requests\");\n\nId poolSize = registry.createId(\"db.size\")\n  .withTag(\"table\", \"users\");  \n

    This variation provides enough context, so that if just the name is selected, the value can be reasoned about and is at least potentially meaningful.

    This variation provides enough context in the name so that the meaning is more apparent and you can successfully reason about the values. For example, if you select threadpool.size, then you can see the total number of threads in all pools. You can then group by or select an id to further filter the data to a subset in which you have an interest.

    "},{"location":"concepts/naming/#use-base-units","title":"Use Base Units","text":"

    Keep measurements in base units where possible. It is better to have all timers in seconds, disk sizes in bytes, and network rates in bytes/second. This allows any SI unit prefixes applied to tick labels on a graph to have an obvious meaning, such as 1k meaning 1 kilobyte, as opposed to 1 kilo-megabyte.

    "},{"location":"concepts/normalization/","title":"Normalization","text":"

    In Atlas, this usually refers to normalizing data points to step boundaries. Suppose that values are actually getting reported at 30 seconds after the minute, instead of exactly on the minute. The values will get normalized to the minute boundary, so that all time series in the system are consistent.

    How a normalized value is computed depends on the data source type. Atlas supports three types indicated by the value of the atlas.dstype tag. In general, you should not need to worry about that, client libraries like Spectator will automatically handle tagging based on the data source type.

    It is recommended to at least skim through the normalization for gauges and rates to better understand how the values you see actually relate to measured data.

    "},{"location":"concepts/normalization/#gauge","title":"Gauge","text":"

    A value that is sampled from some source and the value is used as is. The last value received will be the value used for the interval. For example:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510                                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502                                    \u2502    8    \u2502\n                \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500                             \u2502         \u2502\n                \u2502         \u2502    6                               \u2502         \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502\n 4    \u2502         \u2502         \u2502                \u2502    4    \u2502         \u2502         \u2502\n      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to   \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502\n      \u2502    2    \u2502         \u2502                \u2502         \u2502    2    \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n
    "},{"location":"concepts/normalization/#rate","title":"Rate","text":"

    A rate is a value representing the rate per second since the last reported value. Rate values are normalized using a weighted average. For example:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502                                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500                             \u2502    7    \u2502\n                \u2502         \u2502    6                     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502                          \u2502    5    \u2502         \u2502\n 4    \u2502         \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502         \u2502\n      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to   \u2502    3    \u2502         \u2502         \u2502\n      \u2502    2    \u2502         \u2502                \u2502         \u2502         \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n

    Here, the data is reported at exactly 30s after the minute boundary. So each value represents the average rate per second for 50% of the minute.

    Time Value 1:01 4 * 0.5 + 2 * 0.5 = 2 + 1 = 3 1:02 2 * 0.5 + 8 * 0.5 = 1 + 4 = 5 1:03 8 * 0.5 + 6 * 0.5 = 4 + 3 = 7

    If many samples are received for a given interval, then they will each be weighted based on the fraction of the interval they represent. When no previous sample exists, the value will be treated as the average rate per second over the previous step. This behavior is important to avoid under-counting the contribution from a previous interval. The example below shows what happens if there is no previous or next sample:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502\n                \u2502         \u2502\n                \u2502         \u2502                          \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u2502                          \u2502    5    \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u2502                          \u2502         \u2502    4    \u2502\n      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to        1    \u2502         \u2502         \u2502\n      \u2502    2    \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n

    Why perform weighted averaging for rates instead of the simpler last value approach used with gauges? Because it gives us a better summary of what we actually know from the measurements received. In practical terms:

    • Avoids dropping information if samples are more frequent than the step. Suppose we have a 1 minute step, but data is actually getting reported every 10s. For this example, assume we get 1, 5, 90, 5, 4, and 2. The last value normalization used with Gauges would end up with a value of 2. The rate normalization will give 17.833. Each value is a rate per second, so if you take the (1 + 5 + 90 + 5 + 4 + 2) * 10 = 1070 actual events measured during the interval. That is equivalent to 17.833 * 60 indicating we have an accurate average rate for the step size.
    • Avoids skewing the data causing misleading spikes or drops in the aggregates. Using Atlas you will typically be looking at an aggregate of time series rather than an individual time series that was reported. With last value it can have the effect of skewing samples to a later interval. Suppose the client is reporting once a minute at 5s after the minute. That value indicates more about the previous interval than it does the current one. During traffic transitions, such as moving traffic over to a new cluster or even some auto-scaling events, differences in this skew can result in the appearance of a drop because there will be many new time series getting reported with a delayed start. For existing time series it is still skewed, but tends to be less noticeable. The weighted averaging avoids these problems for the most part.
    "},{"location":"concepts/normalization/#counter","title":"Counter","text":"

    Counter is similar to rate, except that the value reported is monotonically increasing and will be converted to a rate by the backend. The conversion is done by computing the delta between the current sample and the previous sample and dividing by the time between the samples. After that it is the same as a rate.

    Note, that unless the input is a montonically increasing counter it is generally better to have the client perform rate conversion. Since, the starting value is unknown, at least two samples must be received before the first delta can be computed. This means that new time series relying on counter type will be delayed by one interval.

    "},{"location":"concepts/time-series/","title":"Time Series","text":"

    A time series is a sequence of data points reported at a consistent interval over time. The time interval between successive data points is called the step size. In Atlas, each time series is paired with metadata called tags that allow us to query and group the data.

    "},{"location":"concepts/time-series/#tags","title":"Tags","text":"

    A set of key value pairs associated with a time series. Each time series must have at least one tag with a key of name. To make it more concrete, here is an example of a tag set represented as a JSON object:

    {\n  \"name\":       \"server.requestCount\",\n  \"status\":     \"200\",\n  \"endpoint\":   \"api\",\n  \"nf.app\":     \"fooserver\",\n  \"nf.cluster\": \"fooserver-main\",\n  \"nf.stack\":   \"main\",\n  \"nf.region\":  \"us-east-1\",\n  \"nf.zone\":    \"us-east-1c\",\n  \"nf.node\":    \"i-12345678\"\n}\n

    Usage of tags typically falls into two categories:

    1. Namespace. These are tags necessary to qualify a name, so that it can be meaningfully aggregated. Using the sample above, consider computing the sum of all metrics for application fooserver. That number would be meaningless. Properly modelled data should try to make the aggregates meaningful by selecting the name. The sum of all metrics with name = server.requestCount is the overall request count for the service.
    2. Dimensions. These are tags used to filter the data to a meaningful subset. They can be used to see the number of successful requests across the cluster by querying for status = 200 or the number of requests for a single node by querying for nf.node = i-12345678. Most tags should fall into this category.

    When creating metrics, it is important to carefully think about how the data should be tagged. See the naming docs for more information.

    "},{"location":"concepts/time-series/#metric","title":"Metric","text":"

    A metric is a specific quantity being measured, e.g., the number of requests received by a server. In casual language about Atlas metric is often used interchangeably with time series. A time series is one way to track a metric and is the method supported by Atlas. In most cases there will be many time series for a given metric. Going back to the example, request count would usually be tagged with additional dimensions such as status and node. There is one time series for each distinct combination of tags, but conceptually it is the same metric.

    "},{"location":"concepts/time-series/#data-point","title":"Data Point","text":"

    A data point is a triple consisting of tags, timestamp, and a value. It is important to understand at a high level how data points correlate with the measurement. Consider requests hitting a server, this would typically be measured using a counter. Each time a request is received the counter is incremented. There is not one data point per increment, a data point represents the behavior over a span of time called the step size. The client library will sample the counter once for each interval and report a single value.

    Suppose that each circle in the diagram below represents a request:

    1:00       1:01       1:02       1:03\n \u251c\u2500\u25cf\u2500\u2500\u2500\u2500\u25cf\u25cf\u25cf\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n

    There are 5 requests shown, 4 from 1:00 to 1:01, and 1 from 1:02 to 1:03. Assuming all requests incremented the same time series, i.e. all other dimensions such as status code are the same, then this would result in three data points. For counters values are always a rate per second, so for a one minute step size the total number of requests would be divided by 60 seconds. So the values stored would be:

    Time Value 1:01 4 / 60 = 0.0667 1:02 0 / 60 = 0.0000 1:03 1 / 60 = 0.0167"},{"location":"concepts/time-series/#step-size","title":"Step Size","text":"

    The amount of time between two successive data points in a time series. For Atlas the datapoints will always be on even boundaries of the step size. If data is not reported on step boundaries, it will get normalized to the boundary.

    "},{"location":"spectator/","title":"Overview","text":"

    Simple library for instrumenting code to record dimensional time series data.

    At a minimum, you need to:

    1. Understand core concepts.

      • Time Series
      • Normalization
      • Naming
      • Clock
    2. Install the metrics agent.

      • SpectatorD
    3. Install the language-specific library and configuration bindings, where available.

      • Support Class Descriptions
        • Language Overview
      • First-Class Support
        • C++
        • Go
        • Java
        • Node.js
        • Python
      • Best-Effort Support
        • Rust (internal library)
    4. Instrument some code, referring to the core usage guides on the following meter types:

      • Counters
      • Distribution Summaries
      • Gauges
      • Percentile Timers
      • Timers

    After you are more familiar with the library and need assistance with more advanced topics, see the Patterns section on the left.

    "},{"location":"spectator/agent/metrics/","title":"SpectatorD Metrics","text":""},{"location":"spectator/agent/metrics/#spectatormeasurements","title":"spectator.measurements","text":"

    The number of measurements that have either been sent to an Atlas backend or dropped.

    Unit: measurements/second

    Dimensions:

    • id: One of sent or dropped.
    • error: The type of error that occurred, one of http-error, validation, or other.
    • owner: spectatord
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatorregistrysize","title":"spectator.registrySize","text":"

    The number of measurements stored in the registry.

    Unit: measurements

    Dimensions:

    • owner: spectatord
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordparsedcount","title":"spectatord.parsedCount","text":"

    The number of input lines parsed.

    Unit: lines/second

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordparseerrors","title":"spectatord.parseErrors","text":"

    The number of errors that have occurred while parsing input lines.

    Unit: lines/second

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpercentilecachesize","title":"spectatord.percentileCacheSize","text":"

    The number of Distribution Summaries and/or Percentile Timers that have been updated recently in the dedicated cache.

    Unit: meters

    Dimensions:

    • id: One of dist-summary or timer.
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpercentileexpired","title":"spectatord.percentileExpired","text":"

    The number of Distribution Summaries and/or Percentile Timers that have been expired from the dedicated cache.

    Unit: meters/second

    Dimensions:

    • id: One of dist-summary or timer.
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpoolallocsize","title":"spectatord.poolAllocSize","text":"

    The size of the internal string pool.

    Unit: bytes

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpoolentries","title":"spectatord.poolEntries","text":"

    The number of entries in the internal string pool.

    Unit: entries

    "},{"location":"spectator/agent/usage/","title":"Usage","text":""},{"location":"spectator/agent/usage/#spectatord-introduction","title":"SpectatorD Introduction","text":"

    SpectatorD is a high-performance telemetry agent that listens for metrics specified by a text-based protocol and publishes updates periodically to an Atlas aggregator service. It consolidates the logic required to apply common tagging to all metrics received, maintain metric lifetimes, and route metrics to the correct backend.

    The preferred method of using spectatord is to use one of the thin-client implementations, however, the text-based protocol was designed to make it easy for community-supported clients to be developed. It is also easy to use in shell scripts with common command line tools.

    "},{"location":"spectator/agent/usage/#command-line-configuration-flags","title":"Command Line Configuration Flags","text":"
    spectatord --help\nspectatord: A daemon that listens for metrics and reports them to Atlas.\n\n    --admin_port (Port number for the admin server.); default: 1234;\n    --age_gauge_limit (The maximum number of age gauges that may be reported by\n      this process.); default: 1000;\n    --common_tags (Common tags: nf.app=app,nf.cluster=cluster. Override the\n      default common tags. If empty, then spectatord will use the default set.\n      This flag should only be used by experts who understand the risks.);\n      default: \"\";\n    --debug (Debug spectatord. All values will be sent to a dev aggregator and\n      dropped.); default: false;\n    --enable_external (Enable external publishing.); default: false;\n    --enable_socket (Enable UNIX domain socket support. Default is true on Linux\n      and false on MacOS.); default: true;\n    --enable_statsd (Enable statsd support.); default: false;\n    --metatron_dir (Path to the Metatron certificates, which are used for\n      external publishing. A number of well-known directories are searched by\n      default. This option is only necessary if your certificates are in an\n      unusual location.); default: \"\";\n    --meter_ttl (Meter TTL: expire meters after this period of inactivity.);\n      default: 15m;\n    --no_common_tags (No common tags will be provided for metrics. Since no\n      common tags are available, no internal status metrics will be recorded.\n      Only use this feature for special cases where it is absolutely necessary\n      to override common tags such as nf.app, and only use it with a secondary\n      spectatord process.); default: false;\n    --port (Port number for the UDP socket.); default: 1234;\n    --socket_path (Path to the UNIX domain socket.);\n      default: \"/run/spectatord/spectatord.unix\";\n    --statsd_port (Port number for the statsd socket.); default: 8125;\n    --uri (Optional override URI for the aggregator.); default: \"\";\n    --verbose (Use verbose logging.); default: false;\n    --verbose_http (Output debug info for HTTP requests.); default: false;\n\nTry --helpfull to get a list of all flags or --help=substring shows help for\nflags which include specified substring in either in the name, or description or\npath.\n
    "},{"location":"spectator/agent/usage/#endpoints","title":"Endpoints","text":"

    By default, the daemon will listen on the following endpoints:

    • Metrics Message Protocol
    • 1234/udp (~430K reqs/sec with 16MB buffers)
    • /run/spectatord/spectatord.unix Domain Socket (~1M reqs/sec with batching)
    • Admin Server: 1234/tcp

    The choice of which endpoint to use is determined by your performance and access requirements; the Unix domain socket offers higher performance, but requires filesystem access, which may not be tenable under some container configurations. See Performance Numbers for more details.

    "},{"location":"spectator/agent/usage/#usage-examples","title":"Usage Examples","text":"

    :warning: In container environments, the -w0 option may not work and -w1 should be used instead.

    echo \"c:server.numRequests,id=failed:1\" | nc -u -w0 localhost 1234\necho \"t:server.requestLatency:0.042\" | nc -u -w0 localhost 1234\necho \"d:server.responseSizes:1024\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"g:someGauge:60\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"g,300:anotherGauge:60\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"X,1543160297100:monotonic.Source:42\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"X,1543160298100:monotonic.Source:43\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"A:age.gauge:0\" | nc -u -w0 localhost 1234\n
    "},{"location":"spectator/agent/usage/#message-format","title":"Message Format","text":"

    The message sent to the server has the following format, where the ,options and ,tags portions are optional:

    metric-type,options:name,tags:value\n

    Multiple lines may be sent in the same packet, separated by newlines (\\n):

    echo -e \"t:server.requestLatency:0.042\\nd:server.responseSizes:1024\" | nc -u -w0 localhost 1234\n
    "},{"location":"spectator/agent/usage/#metric-types","title":"Metric Types","text":"Metric Type Symbol Description Age Gauge A The value is the time in seconds since the epoch at which an event has successfully occurred, or 0 to use the current time in epoch seconds. After an Age Gauge has been set, it will continue reporting the number of seconds since the last time recorded, for as long as the spectatord process runs. The purpose of this metric type is to enable users to more easily implement the Time Since Last Success alerting pattern. To set a specific time as the last success: A:time.sinceLastSuccess:1611081000. To set now() as the last success: A:time.sinceLastSuccess:0. By default, a maximum of 1000 Age Gauges are allowed per spectatord process, because there is no mechanism for cleaning them up. This value may be tuned with the --age_gauge_limit flag on the spectatord binary. Counter c The value is the number of increments that have occurred since the last time it was recorded. The value will be reported to the backend as a rate-per-second. Distribution Summary d The value tracks the distribution of events. It is similar to a Timer, but more general, because the size does not have to be a period of time. For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query. Gauge g The value is a number that was sampled at a point in time. The default time-to-live (TTL) for gauges is 900 seconds (15 minutes) - they will continue reporting the last value set for this duration of time. Optionally, the TTL may be specified in seconds, with a minimum TTL of 5 seconds. For example, g,120:gauge:42.0 spcifies a gauge with a 120 second (2 minute) TTL. Max Gauge m The value is a number that was sampled at a point in time, but it is reported as a maximum gauge value to the backend. Monotonic Counter (double) C The value is a monotonically increasing number. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend as a rate-per-second. The value is a double data type, and negative deltas are ignored. This data type provides flexibility for transforming values into base units with division. Commonly used with networking metrics. Monotonic Counter (uint64) U The value is a monotonically increasing number. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend as a rate-per-second. The value is a uint64 data type, and it will handle rollovers. Commonly used with networking metrics. Monotonic Counter (double) with Millisecond Timestamps X The value is a monotonically increasing number, sampled at a specified number of milliseconds since the epoch. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend. The value should be a uint64 data type, and it will handle rollovers. This is an experimental metric type that can be used to track monotonic sources that were sampled in the recent past, with the value normalized over the reported time period. The timestamp in milliseconds since the epoch when the value was sampled must be included as a metric option: X,1543160297100:monotonic.Source:42 Percentile Distribution Summary D The value tracks the distribution of events, with percentile estimates. It is similar to a Percentile Timer, but more general, because the size does not have to be a period of time. For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query. In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Distribution Summary. Be diligent about any additional dimensions added to Percentile Distribution Summaries and ensure that they have a small bounded cardinality. Percentile Timer T The value is the number of seconds that have elapsed for an event, with percentile estimates. This metric type will track the data distribution by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions. In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality. Timer t The value is the number of seconds that have elapsed for an event.

    The data type for all numbers except U is double. The U values are recorded as uint64_t, and the calculated deltas are passed to the backend as double. Passing negative values for uint64_t data types will cause the parsed string value to rollover.

    "},{"location":"spectator/agent/usage/#metric-name-and-tags","title":"Metric Name and Tags","text":"

    The metric name and tags must follow Atlas restrictions, which are described in the sections below.

    Tags are optional. They may be specified as comma-separated key=value pairs after the metric name. For example:

    fooIsTheName,some.tag=val1,some.otherTag=val2\n

    See Atlas Naming Conventions for recommendations on naming metrics.

    "},{"location":"spectator/agent/usage/#length-restrictions","title":"Length Restrictions","text":"Limit Min Max Length of name 1 255 Tag key length 2 60 Tag value length 1 120"},{"location":"spectator/agent/usage/#allowed-characters","title":"Allowed Characters","text":"

    The metric name, tag keys and values may only use characters in the following set: -._A-Za-z0-9.

    All others characters will be converted to an underscore (_) by the client.

    To avoid issues with parsing metrics, avoid using the SpectatorD protocol delimiter characters (,=:) rather than relying on the client to rewrite them to _.

    "},{"location":"spectator/agent/usage/#metric-value","title":"Metric Value","text":"

    A double value, or a uint64 value for one kind of Monotonic Counters. The meaning of the value depends on the metric type.

    "},{"location":"spectator/agent/usage/#metrics","title":"Metrics","text":"

    See Metrics for a list of metrics published by this service.

    "},{"location":"spectator/agent/usage/#admin-server","title":"Admin Server","text":"

    An administrative server is provided with SpectatorD, so that debugging information and few data management tasks may be completed. By default, this server listens on port 1234/TCP, but this can be modified with the --admin_port flag. The endpoints which change data may only be accessed from localhost.

    • GET /
      • Returns a service description and list of available endpoints.
    • GET /config
      • Returns the current SpectatorD configuration, including the current set of common tags.
    • GET /config/common_tags
      • Returns a description of how to use this endpoint to modify common tags.
    • POST /config/common_tags
      • Create, modify or delete common tags from the allowed set of Mantis common tags. No other common tags may be modified. Create or update a tag by setting it to a string. Delete a tag by setting the value to an empty string.
      • Allowed tags:
        • mantisJobId
        • mantisJobName
        • mantisUser
        • mantisWorkerIndex
        • mantisWorkerNumber
        • mantisWorkerStageNumber
      • Example:
        curl -X POST \\\n-d '{\"mantisJobId\": \"foo\", \"mantisJobName\": \"bar\", \"mantisUser\": \"\"}' \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/config/common_tags\n
    • GET /metrics
      • Return an object containing lists of all metrics currently known to the Registry, grouped by type.
    • DELETE /metrics/A
      • Delete all AgeGauge metrics from the Registry.
    • DELETE /metrics/A/{id}
      • Delete one AgeGauge metric from the Registry, identified by the id.
      • Example:
        curl -X DELETE \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/metrics/A/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    • DELETE /metrics/g
      • Delete all Gauge metrics from the Registry.
    • DELETE /metrics/g/{id}
      • Delete one Gauge metric from the Registry, identified by the id.
      • Example:
        curl -X DELETE \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/metrics/g/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    "},{"location":"spectator/agent/usage/#performance-numbers","title":"Performance Numbers","text":"

    A key goal of this project is to deliver high performance. This means that we need to use few resources for the common use case, where the number of metric updates is relatively small (< 10k reqs/sec), and it also needs to be able to handle hundreds of thousands of updates per second when required.

    Using Unix domain sockets, we can handle close to 1M metric updates per second, assuming the client batches the updates and sends a few at a time. Sending every single metric update requires a lot of context switching, but is something that works well for the majority of our use cases. This simplicity means the user does not have to maintain any local state.

    Transport          Batch Size    First 10M          Second 10M\nUnix Dgram         1             22.98s (435k rps)  20.58s (486k rps)\nUnix Dgram         8             11.46s (873k rps)   9.89s (1011k rps)\nUnix Dgram         32            10.38s (963k rps)   8.49s (1178k rps)\n

    The UDP transport is particularly sensitive the max receive buffer size (16MB on our systems).

    Our tests indicate that sending 430K rps to the UDP port did not drop packets, but if there is a need for higher throughput, then tweaking /proc/sys/net/unix/max_dgram_qlen is recommended.

    "},{"location":"spectator/core/clock/","title":"Clock","text":"

    When taking measurements or working with timers it is recommended to use the Clock interface. It provides two methods for measuring time:

    "},{"location":"spectator/core/clock/#wall-time","title":"Wall Time","text":"

    This is what most users think of for time. It can be used to get the current time like what you would see on a wall clock. In most cases when not running in tests this will call System.currentTimeMillis().

    Note that the values returned by this method may not be monotonically increasing. Just like a clock on your wall, this value can go back in time or jump forward at unpredictable intervals, if someone sets the time. On many systems, ntpd or similar daemons will be constantly keeping the time synced up with an authoritative source.

    With Spectator, the Clock is typically accessed via the Registry.

    Java usage example:

    // Current time in milliseconds since the epoch\nlong currentTime = registry.clock().wallTime();\n
    "},{"location":"spectator/core/clock/#monotonic-time","title":"Monotonic Time","text":"

    While it is good in general for the wall clock to show the correct time, the unpredictable changes mean it is not a good choice for measuring how long an operation took. Consider a simple example of measuring request latency on a server:

    long start = registry.clock().wallTime();\nhandleRequest(request, response);\nlong end = registry.clock().wallTime();\nreqLatencyTimer.record(end - start, TimeUnit.MILLISECONDS);\n

    If ntp fixes the server time between start and end, then the recorded latency will be wrong. Spectator will protect against obviously wrong measurements like negative latencies by dropping those values when they are recorded. However, the change could incorrectly shorten or lengthen the measured latency.

    The clock interface also provides access to a monotonic source that is only useful for measuring elapsed time, for example:

    long start = registry.clock().monotonicTime();\nhandleRequest(request, response);\nlong end = registry.clock().monotonicTime();\nreqLatencyTimer.record(end - start, TimeUnit.NANOSECONDS);\n

    In most cases this will map to System.nanoTime(). Note the actual value returned is not meaningful unless compared with another sample to get a delta.

    "},{"location":"spectator/core/clock/#manual-clock","title":"Manual Clock","text":"

    If timing code is written to the Clock interface, then alternative implementations can be plugged-in. For test cases, it is common to use ManualClock so that tests can be reliable and fast without having to rely on hacks like sleep or assuming something will run in less than a certain amount of time.

    ManualClock clock = new ManualClock();\nRegistry registry = new DefaultRegistry(clock);\n\nTimer timer = registry.timer(\"test\");\ntimer.record(() -> {\n  doSomething();\n  clock.setMonotonicTime(42L);\n});\n\nAssert.assertEquals(timer.totalTime(), 42L);\n
    "},{"location":"spectator/core/meters/counter/","title":"Counter","text":"

    A Counter is used to measure the rate at which some event is occurring. Considering a simple queue, Counters could be used to measure things like the rate at which items are being inserted and removed.

    Counters are reported to the backend as a rate-per-second. This makes it much easier to reason about the measurement and allows for aggregating the counter across instances.

    In Atlas, the :per-step operator can be used to convert them back into a count-per-step on a graph.

    Note

    For high performance code, such as incrementing in a tight loop that lasts less than a reporting interval, increment a local variable and add the final value to the counter after the loop has completed.

    "},{"location":"spectator/core/meters/counter/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/counter/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/counter/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/dist-summary/","title":"Distribution Summary","text":"

    A Distribution Summary is used to track the distribution of events. It is similar to a [Timer], but more general, in that the size does not have to be a period of time. For example, a distribution summary could be used to measure the payload sizes of requests hitting a server or the number of records returned from a query.

    It is recommended to always use base units when recording the data. So, if measuring the payload size use bytes, not kilobytes or some other unit. This allows the presentation layer for graphing to use either SI or IEC prefixes in a natural manner, and you do not need to consider the meaning of something like \"milli-milliseconds\".

    "},{"location":"spectator/core/meters/dist-summary/#querying","title":"Querying","text":"

    Note

    Distribution summaries report summarized statistics about the measurements for a time window including the totalAmount, count, max and totalOfSquares. If you were to simply query for the name of your timer via

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and\n

    you would get a nonsense value that is the sum of the reported statistics.

    When querying the results of a distribution summary, either select one of the statistics above via a filter, or use one of the operators below to generate a useful response.

    "},{"location":"spectator/core/meters/dist-summary/#average-measurement-dist-avg","title":"Average Measurement (:dist-avg)","text":"

    To compute the average latency across an arbitrary group, use the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/dist-summary/#maximum-measurement-dist-max","title":"Maximum Measurement (:dist-max)","text":"

    To compute the maximum latency across a group, use :dist-max:

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-max,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/dist-summary/#standard-deviation-of-measurement-dist-stddev","title":"Standard Deviation of Measurement (:dist-stddev)","text":"

    To compute the standard deviation of measurements across all instances for a time interval:

    nnf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-stddev\n
    "},{"location":"spectator/core/meters/dist-summary/#raw-statistics","title":"Raw Statistics","text":"

    Note that it is possible to plot the individual statics by filtering on the statistic tag. If you choose to do so, note that the count, totalAmount and totalOfSquares are counters thus reported as rates per second, while the max is reported as a gauge.

    "},{"location":"spectator/core/meters/dist-summary/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/dist-summary/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/dist-summary/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/gauge/","title":"Gauge","text":"

    A Gauge is a value that is sampled at some point in time. Typical examples for Gauges would be the size of a queue, or the number of threads in a running state. Since Gauges are not updated inline when a state change occurs, there is no information about what might have occurred between samples.

    Consider monitoring the behavior of a queue of tasks. If the data is being collected once a minute, then a Gauge for the size will show the size when it was sampled (a.k.a. last-write-wins). The size may have been much higher or lower at some point during interval, but that is not known.

    "},{"location":"spectator/core/meters/gauge/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/gauge/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/gauge/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/timer/","title":"Timer","text":"

    A Timer is used to measure how long (in seconds) some event is taking. Timer measurements are typically short, less than 1 minute.

    A selection of specialized timers include:

    • LongTaskTimer - Periodically reports the time taken for a long running task (> 1 minute). See the Long Task Timer pattern for details.
    • PercentileTimer - Useful if percentile approximations are needed in addition to basic stats. See the Percentile Timer pattern for details.
    "},{"location":"spectator/core/meters/timer/#querying","title":"Querying","text":"

    Note

    Timers report summarized statistics about the measurements for a time window including the totalTime, count, max and totalOfSquares. If you were to simply query for the name of your timer via

    nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and\n

    you would get a nonsense value that is the sum of the reported statistics.

    When querying the results of a timer, use one of the operators below to generate a useful response.

    "},{"location":"spectator/core/meters/timer/#average-measurement-dist-avg","title":"Average Measurement (:dist-avg)","text":"

    To compute the average latency across an arbitrary group, use the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/timer/#maximum-measurement-dist-max","title":"Maximum Measurement (:dist-max)","text":"

    To compute the maximum latency across a group, use :dist-max:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-max,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/timer/#standard-deviation-of-measurement-dist-stddev","title":"Standard Deviation of Measurement (:dist-stddev)","text":"

    To compute the standard deviation of measurements across all instances for a time interval:

    nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-stddev\n
    "},{"location":"spectator/core/meters/timer/#raw-statistics","title":"Raw Statistics","text":"

    Note that it is possible to plot the individual statics by filtering on the statistic tag. If you choose to do so, note that the count, totalAmount and totalOfSquares are counters thus reported as rates per second, while the max is reported as a gauge.

    "},{"location":"spectator/core/meters/timer/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/timer/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/timer/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/lang/overview/","title":"Overview","text":"

    The original Spectator library was written in Java, with the first stable version (0.35.0) released on Jan 18, 2016. Since then, there has been a proliferation of languages at Netflix which seek first-class observability support.

    After some thought and experimentation, we have settled on a strategy of developing minimal Spectator implementations in many languages, which function as thin clients that send data to Atlas. Our goal is to have partners invested in each experimental language who will provide the necessary expertise to develop idiomatic solutions, deliver real-world feedback on library usage, and shoulder some of the support and maintenance burden.

    We think this is a more sustainable path over the long-term than expanding our team to support N different languages for this singular polyglot use case.

    "},{"location":"spectator/lang/overview/#first-class-support","title":"First-Class Support","text":"

    These libraries are fully-supported by the team and see wide use across Netflix. Issues are fixed in a timely manner and updates are published regularly.

    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/lang/overview/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/lang/cpp/usage/","title":"spectator-cpp Usage","text":"

    C++ thin-client metrics library for use with Atlas and SpectatorD.

    "},{"location":"spectator/lang/cpp/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    #include <spectator/registry.h>\n\n// use default values\nstatic constexpr auto kDefault = 0;\n\nstruct Request {\n  std::string country;\n};\n\nstruct Response {\n  int status;\n  int size;\n};\n\nclass Server {\n public:\n  explicit Server(spectator::Registry* registry)\n      : registry_{registry},\n        request_count_id_{registry->CreateId(\"server.requestCount\", spectator::Tags{})},\n        request_latency_{registry->GetTimer(\"server.requestLatency\")},\n        response_size_{registry->GetDistributionSummary(\"server.responseSizes\")} {}\n\n  Response Handle(const Request& request) {\n    auto start = std::chrono::steady_clock::now();\n\n    // do some work and obtain a response...\n    Response res{200, 64};\n\n    // Update the Counter id with dimensions, based on information in the request. The Counter\n    // will be looked up in the Registry, which is a fairly cheap operation, about the same as\n    // the lookup of an id object in a map. However, it is more expensive than having a local\n    // variable set to the Counter.\n    auto cnt_id = request_count_id_\n        ->WithTag(\"country\", request.country)\n        ->WithTag(\"status\", std::to_string(res.status));\n    registry_->GetCounter(std::move(cnt_id))->Increment();\n    request_latency_->Record(std::chrono::steady_clock::now() - start);\n    response_size_->Record(res.size);\n    return res;\n  }\n\n private:\n  spectator::Registry* registry_;\n  std::shared_ptr<spectator::Id> request_count_id_;\n  std::shared_ptr<spectator::Timer> request_latency_;\n  std::shared_ptr<spectator::DistributionSummary> response_size_;\n};\n\nRequest get_next_request() {\n  return Request{\"US\"};\n}\n\nint main() {\n  auto logger = spdlog::stdout_color_mt(\"console\"); \n  std::unordered_map<std::string, std::string> common_tags('xatlas.process', 'some-sidecar');\n  spectator::Config cfg{\"unix:/run/spectatord/spectatord.unix\", common_tags};\n  spectator::Registry registry{std::move(cfg), logger);\n\n  Server server{&registry};\n\n  for (auto i = 1; i <= 3; ++i) {\n    // get a request\n    auto req = get_next_request();\n    server.Handle(req);\n  }\n}\n
    "},{"location":"spectator/lang/cpp/usage/#high-volume-publishing","title":"High-Volume Publishing","text":"

    By default, the library sends every meter change to the spectatord sidecar immediately. This involves a blocking send call and underlying system calls, and may not be the most efficient way to publish metrics in high-volume use cases. For this purpose a simple buffering functionality in Publisher is implemented, and it can be turned on by passing a buffer size to the spectator::Config constructor. It is important to note that, until this buffer fills up, the Publisher will not send nay meters to the sidecar. Therefore, if your application doesn't emit meters at a high rate, you should either keep the buffer very small, or do not configure a buffer size at all, which will fall back to the \"publish immediately\" mode of operation.

    "},{"location":"spectator/lang/go/migrations/","title":"Migrations","text":""},{"location":"spectator/lang/go/migrations/#migrating-from-0x-to-2x","title":"Migrating from 0.X to 2.X","text":"

    Version 2.X consists of a major rewrite that turns spectator-go into a thin client designed to send metrics through spectatord. As a result some functionality has been moved to other packages or removed.

    "},{"location":"spectator/lang/go/migrations/#new","title":"New","text":""},{"location":"spectator/lang/go/migrations/#writers","title":"Writers","text":"

    spectator.Registry now supports different writers. The default writer is writer.UdpWriter which sends metrics to spectatord through UDP.

    Writers can be configured through spectator.Config.Location.

    Possible values are:

    • none: Configures a no-op writer that does nothing. Can be used to disable metrics collection.
    • stdout: Writes metrics to stdout.
    • stderr: Writes metrics to stderr.
    • memory: Writes metrics to memory. Useful for testing.
    • file:///path/to/file: Writes metrics to a file.
    • unix:///path/to/socket: Writes metrics to a Unix domain socket.
    • udp://host:port: Writes metrics to a UDP socket.

    Location can also be set through the environment variable SPECTATOR_OUTPUT_LOCATION. If both are set, the environment variable takes precedence over the passed config.

    The environment variable SPECTATOR_OUTPUT_LOCATION can be set to none to disable metrics collection.

    "},{"location":"spectator/lang/go/migrations/#meters","title":"Meters","text":"

    The following new Meters have been added:

    • meter.MaxGauge
    • meter.Gauge with TTL
    "},{"location":"spectator/lang/go/migrations/#common-tags","title":"Common Tags","text":"

    Common tags are now automatically added to all Meters. Their values are read from the environment variables.

    Tag Environment Variable nf.container TITUS_CONTAINER_NAME nf.process NETFLIX_PROCESS_NAME

    Tags from environment variables take precedence over tags passed on code when creating the Config.

    Note that common tags sourced by spectatord can't be overwritten.

    "},{"location":"spectator/lang/go/migrations/#config","title":"Config","text":"
    • Config is now created through a constructor which throws error if the passed in parameters are not valid.
    • Config members are now private.
    "},{"location":"spectator/lang/go/migrations/#moved","title":"Moved","text":"
    • Runtime metrics collection has been moved to spectator-go-runtime-metrics. Follow instructions in the README to enable collection.
    • Some types have been moved to different packages. For example, spectator.Counter is now in meter.Counter.
    "},{"location":"spectator/lang/go/migrations/#removed","title":"Removed","text":"
    • spectator.HttpClient has been removed. Use the standard http.Client instead.
    • spectator.Meters no longer has a Measure() []Measurement function. Meters are now stateless and do not store measurements.
    • spectator.Clock has been removed. Use the standard time package instead.
    • spectator.Config has been greatly simplified.
    • spectator.Registry no longer has a Start() function. The Registry is now effectively stateless and there is nothing to start other than opening the output location.
    • spectator.Registry no longer has a Stop() function. Instead, use Close() to close the registry. Once the registry is closed, it can't be started again.
    • spectator.Config.IpcTimerRecord has been removed. Use a meter.Timer instead to record Ipc metrics.
    • spectator.MeterFactoryFun has been removed. If you need to create a custom meter you can do so by wrapping one of the meters returned by spectator.Registry.
    • spectator.Registry no longer reports spectator.measurements metrics. Instead, you can use spectatord metrics to troubleshoot.
    • spectator.Registry no longer keep track of the Meters it creates. This means that you can't get a list of all Meters from the Registry. If you need to keep track of Meters, you can do so in your application code.
    • Percentile* meters no longer support defining min/max values.
    • spectator.Registry no longer allows setting a different logger after creation. A custom logger can be set in the spectator.Config before creating the Registry.
    • File-based configuration is no longer supported.
    "},{"location":"spectator/lang/go/migrations/#migration-steps","title":"Migration Steps","text":"
    1. Make sure you're not relying on any of the removed functionality.
    2. Update imports to use meters package instead of spectator for Meters.
    3. If you want to collect runtime metrics pull spectator-go-runtime-metrics and follow the instructions in the README.
    4. If you use PercentileDistributionSummary or PercentileTimer, then you need to update your code to use the respective functions provided by the Registry to initialize these meters.
    5. Remove dependency on Spectator Go Internal configuration library. Such dependency is no longer required.
    6. There is no longer an option to start or stop the registry at runtime. If you need to configure a Registry that doesn't emit metrics, for testing purposes, you can use the spectator.Config.Location option with none to configure a no-op writer.
    "},{"location":"spectator/lang/go/migrations/#writing-tests","title":"Writing Tests","text":"

    To write tests against this library, instantiate a test instance of the Registry and configure it to use the MemoryWriter, which stores all updates in an Array. Maintain a handle to the MemoryWriter, then inspect the Lines() to verify your metrics updates. See the source code for more testing examples.

    package app\n\nimport (\n    \"fmt\"\n    \"github.com/Netflix/spectator-go/v2/spectator/logger\"\n    \"github.com/Netflix/spectator-go/v2/spectator/writer\"\n    \"testing\"\n    \"time\"\n)\n\nfunc TestRegistryWithMemoryWriter_Counter(t *testing.T) {\n    mw := &writer.MemoryWriter{}\n    r := NewTestRegistry(mw)\n\n    counter := r.Counter(\"test_counter\", nil)\n    counter.Increment()\n    expected := \"c:test_counter:1\"\n    if len(mw.Lines()) != 1 || mw.Lines()[0] != expected {\n        t.Errorf(\"Expected '%s', got '%s'\", expected, mw.Lines()[0])\n    }\n}\n\nfunc NewTestRegistry(mw *writer.MemoryWriter) Registry {\n    return &spectatordRegistry{\n        config: &Config{},\n        writer: mw,\n        logger: logger.NewDefaultLogger(),\n    }\n}\n
    "},{"location":"spectator/lang/go/usage/","title":"spectator-go Usage","text":"

    Go thin-client metrics library for use with Atlas and SpectatorD.

    "},{"location":"spectator/lang/go/usage/#supported-go-versions","title":"Supported Go Versions","text":"

    This library currently targets the latest two stable versions of Go.

    There is one language feature used in the project which requires at least 1.21 - the log/slog structured logging library.

    "},{"location":"spectator/lang/go/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    package main\n\nimport (\n    \"github.com/Netflix/spectator-go/v2/spectator\"\n    \"github.com/Netflix/spectator-go/v2/spectator/meter\"\n    \"strconv\"\n    \"time\"\n)\n\ntype Server struct {\n    registry       spectator.Registry\n    requestCountId *meter.Id\n    requestLatency *meter.Timer\n    responseSizes  *meter.DistributionSummary\n}\n\ntype Request struct {\n    country string\n}\n\ntype Response struct {\n    status int\n    size   int64\n}\n\nfunc (s *Server) Handle(request *Request) (res *Response) {\n    start := time.Now()\n\n    // initialize response\n    res = &Response{200, 64}\n\n    // Update the counter with dimensions based on the request.\n    tags := map[string]string{\n        \"country\": request.country,\n        \"status\":  strconv.Itoa(res.status),\n    }\n    requestCounterWithTags := s.requestCountId.WithTags(tags)\n    counter := s.registry.CounterWithId(requestCounterWithTags)\n    counter.Increment()\n\n    // ...\n    s.requestLatency.Record(time.Since(start))\n    s.responseSizes.Record(res.size)\n    return\n}\n\nfunc newServer(registry spectator.Registry) *Server {\n    return &Server{\n        registry,\n        registry.NewId(\"server.requestCount\", nil),\n        registry.Timer(\"server.requestLatency\", nil),\n        registry.DistributionSummary(\"server.responseSizes\", nil),\n    }\n}\n\nfunc getNextRequest() *Request {\n    // ...\n    return &Request{\"US\"}\n}\n\nfunc main() {\n    commonTags := map[string]string{\"nf.platform\": \"my_platform\", \"process_name\": \"my_process\"}\n    // if desired, replace the logger with a custom one, using the third parameter here:\n    config, _ := spectator.NewConfig(\"\", commonTags, nil)\n\n    registry, _ := spectator.NewRegistry(config)\n    defer registry.Close()\n\n    server := newServer(registry)\n\n    for i := 1; i < 3; i++ {\n        // get a request\n        req := getNextRequest()\n        server.Handle(req)\n    }\n}\n
    "},{"location":"spectator/lang/go/usage/#logging","title":"Logging","text":"

    Logging is implemented with the standard Golang slog package. The logger defines interfaces for Debugf, Infof, and Errorf. There are useful messages implemented at the Debug level which can help diagnose the metric publishing workflow. The logger can be overridden by providing one as the third parameter of the Config constructor.

    "},{"location":"spectator/lang/go/usage/#runtime-metrics","title":"Runtime Metrics","text":"

    Use spectator-go-runtime-metrics. Follow instructions in the README to enable collection.

    "},{"location":"spectator/lang/java/servo-migration/","title":"Servo Migration","text":""},{"location":"spectator/lang/java/servo-migration/#servo-comparison","title":"Servo Comparison","text":"

    Servo is an alternative client monitoring library that is also developed by Netflix. Originally, Spectator was an experiment for a simpler API that wrapped Servo. It was done as a separate project to avoid breaking backwards compatibility for Servo.

    From a user perspective, both will be supported for a long time, but most of our efforts for future improvement will go to Spectator. For new code, it is recommended to use the spectator API. If running at Netflix, the correct bindings will be in place for both Servo and Spectator.

    "},{"location":"spectator/lang/java/servo-migration/#differences","title":"Differences","text":"

    This section provides a quick summary of the differences between Spectator and Servo.

    "},{"location":"spectator/lang/java/servo-migration/#simpler-api","title":"Simpler API","text":"

    Servo gives the user a lot of control, but this makes it hard to use correctly. For example, to create a Counter, the user needs to understand the trade-offs and choose between:

    • BasicCounter
    • DynamicCounter
    • ContextualCounter
    • StepCounter

    Further, each of these can impact how data is reported to observers. The Spectator API focuses on the constructs a user needs to instrument the code. In Spectator, the user would always use the Registry to create a Counter. The implementation details are left up to the Registry.

    The registration is simpler as well to avoid common pitfalls when using Servo like overwriting a registered object.

    "},{"location":"spectator/lang/java/servo-migration/#more-focused","title":"More Focused","text":"

    The goal of Spectator is instrumenting code to send to a dimensional time-series system like Atlas. Servo has goals of staying compatible with a number of legacy libraries and naming formats, exposing data to JMX, etc. Examples of how this influences decisions:

    • No support for non-numeric data. Servo supported this feature, so that it can expose data to JMX. Exposing the numeric data registered in Spectator to JMX can be done using a registry that supports it, but there is no goal to be a general interface for exposing arbitrary data in JMX.
    • No support for custom time units when reporting timer data. Base units should always be used for reporting and conversions can be performed in the presentation layer, if needed. It also avoids a lot of the confusion around the timer unit for the data and issues like creating aggregates that are meaningless due to mixed units.

    It is better to have a simple way to send correct and easy-to-understand data to the backend than many options. If you want more knobs, then you can use Servo.

    "},{"location":"spectator/lang/java/servo-migration/#di-friendly","title":"DI Friendly","text":"

    When Servo was originally written, dependency injection (DI) was not heavily used at Netflix. Further, Servo needed to stay compatible with a number of use-cases that were heavily static.

    While Spectator does have a static registry that can be used, the recommended way is to create a registry and inject it either manually or via a framework into the classes that need it. This also makes it much easier to test in isolation.

    "},{"location":"spectator/lang/java/servo-migration/#migration","title":"Migration","text":"

    If you want to migrate from the Servo API to the Spectator API, then this section provides some guides on how Servo constructs can be ported over. The sub-sections are the class names of monitor types supported by Servo.

    For users at Netflix, we are not actively pushing teams to migrate or do any additional work. Servo is still supported and if it works for your use-case, then feel free to continue using it.

    "},{"location":"spectator/lang/java/servo-migration/#registration","title":"Registration","text":"

    First read through the Servo docs on registration. With Servo, say you have a class like the following:

    public class Foo {\n\n  private AtomicInteger gauge;\n  private Counter counter;\n\n  public Foo(String id) {\n    gauge = new AtomicInteger();\n    counter = new BasicCounter(MonitorConfig.builder(\"counter\").build());\n    Monitors.registerObject(id, this);\n  }\n\n  @Monitor(name = \"gauge\", type = DataSourceType.GAUGE)\n  private int gauge() {\n    return gauge.get();\n  }\n\n  public void doSomething() {\n    ...\n  }\n}\n

    The state of the class is in the member variables of an instance of Foo. If multiple instances of class Foo are created with the same value for id, then the last one will overwrite the others for the registration. So the values getting reported will only be from the last instance registered. Also the registry has a reference to the instance of Foo, so it will never go away.

    For Counters and Timers, one way to get around this is to use DynamicCounter and DynamicTimer, respectively. Those classes will automatically handle the registration and expire if there is no activity. They also get used for cases where the set of dimensions is not known up front.

    Gauges need to sample the state of something, so they need to have a reference to an object that contains the state. So the user would need to ensure that only a single copy was registered leading to patterns like:

    class Foo {\n\n  private static class FooStats {\n\n    private AtomicInteger gauge;\n    private Counter counter;\n\n    public FooStats(String id) {\n      gauge = new AtomicInteger();\n      counter = new BasicCounter(MonitorConfig.builder(\"counter\").build());\n      Monitors.registerObject(id, this);\n    }\n\n    @Monitor(name = \"gauge\", type = DataSourceType.GAUGE)\n    private int gauge() {\n      return gauge.get();\n    }\n  }\n\n  private static ConcurrentHashMap<String, FooStats> STATS =\n    new ConcurrentHashMap<>();\n\n  private final FooStats stats;\n\n  public Foo(String id) {\n    stats = STATS.computeIfAbsent(id, (i) -> new FooStats(i));\n  }\n\n  public void doSomething() {\n    ...\n    stats.update();\n  }\n}\n

    This ensures that there is a single copy for a given id. In spectator this example would look like:

    public class Foo {\n\n  private AtomicInteger gauge;\n  private Counter counter;\n\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"gauge\").withTag(\"id\", id);\n    gauge = registry.gauge(gaugeId, new AtomicInteger());\n    counter = registry.counter(\"counter\", \"id\", id);\n  }\n\n  public void doSomething() {\n    ...\n  }\n}\n

    Everything using the same Registry will get the same Counter instance, if the same id is used. For the Gauge, the Registry will keep a weak reference and will sum the values if multiple instances are present. Since it is a weak reference, nothing will prevent an instance of Foo from getting garbage collected.

    "},{"location":"spectator/lang/java/servo-migration/#annotations","title":"Annotations","text":"

    Annotations are not supported, use the appropriate meter type:

    DataSourceType Spectator Alternative COUNTER Counter Usage GAUGE Gauge Usage INFORMATIONAL Not supported"},{"location":"spectator/lang/java/servo-migration/#basiccounter","title":"BasicCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n  private final Counter c =\n    new BasicCounter(MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Counter c;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    c = registry.counter(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicgauge","title":"BasicGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final BasicGauge g = new BasicGauge(\n    MonitorConfig.builder(\"name\").build(),\n    this::getCurrentValue);\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    registry.gauge(gaugeId, this, Foo::getCurrentValue);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basictimer","title":"BasicTimer","text":"

    See the general overview of registration differences and summary of Timer usage. In Spectator, the reported unit for Timers is always seconds and cannot be changed. Seconds is the base unit and other units should only be used as a presentation detail. Servo allows the unit to be customized and defaults to milliseconds.

    Servo:

    public class Foo {\n  private final Timer t = new BasicTimer(\n    MonitorConfig.builder(\"name\").build(), TimeUnit.SECONDS);\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    Stopwatch s = t.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Timer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    t = registry.timer(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    t.record(() -> {\n      ...\n    });\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicdistributionsummary","title":"BasicDistributionSummary","text":"

    See the general overview of registration differences and summary of Distribution Summary usage.

    Servo:

    public class Foo {\n  private final BasicDistributionSummary s = new BasicDistributionSummary(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    ...\n    s.record(getValue());\n  }\n}\n

    Spectator:

    public class Foo {\n  private final DistributionSummary s;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    s = registry.distributionSummary(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    ...\n    s.record(getValue());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicinformational","title":"BasicInformational","text":"

    Not supported, see the overview of differences.

    "},{"location":"spectator/lang/java/servo-migration/#basicstopwatch","title":"BasicStopwatch","text":"

    There isn't an explicit stopwatch class in Spectator. Use a timing call directly.

    Servo:

      public void doSomething() {\n    Stopwatch s = timer.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n

    Spectator:

      public void doSomething() {\n    final long s = System.nanoTime();\n    try {\n      ...\n    } finally {\n      timer.record(System.nanoTime() - s, TimeUnit.NANOSECONDS);\n    }\n  }\n
    "},{"location":"spectator/lang/java/servo-migration/#buckettimer","title":"BucketTimer","text":"

    See the general overview of registration differences.

    Servo:

    public class Foo {\n  private final Timer t = new BucketTimer(\n    MonitorConfig.builder(\"name\").build(),\n    new BucketConfig.Builder()\n      .withTimeUnit(TimeUnit.MILLISECONDS)\n      .withBuckets(new long[] { 500, 2500, 5000, 10000 })\n      .build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    Stopwatch s = t.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Timer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id timerId = registry.createId(\"name\", \"id\", id);\n    BucketFunction f = BucketFunctions.latency(10, TimeUnit.SECONDS);\n    t = BucketTimer.get(registry, timerId, f);\n  }\n\n  public void doSomething() {\n    t.record(() -> {\n      ...\n    });\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#contextualcounter","title":"ContextualCounter","text":"

    Not supported. A fixed tag list for the context is too rigid and this class was never used much at Netflix. Future work being looked at in issue-180.

    "},{"location":"spectator/lang/java/servo-migration/#contextualtimer","title":"ContextualTimer","text":"

    Not supported. A fixed tag list for the context is too rigid and this class was never used much at Netflix. Future work being looked at in issue-180.

    "},{"location":"spectator/lang/java/servo-migration/#doublegauge","title":"DoubleGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final DoubleGauge g = new DoubleGauge(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    import com.google.common.util.concurrent.AtomicDouble;\n\npublic class Foo {\n  private final AtomicDouble v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicDouble());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#durationtimer","title":"DurationTimer","text":"

    See the general overview of registration differences, the summary of Timer usage, and Long Task Timer usage.

    Servo:

    public class Foo {\n  private final DurationTimer t = new DurationTimer(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final LongTaskTimer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    t = registry.longTaskTimer(\"name\", \"id\", id);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#dynamiccounter","title":"DynamicCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n\n  private final String id;\n\n  public Foo(String id) {\n    this.id = id;\n  }\n\n  public void doSomething(Context ctxt) {\n    DynamicCounter.increment(\"staticId\", \"id\", id);\n    DynamicCounter.increment(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo());\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Registry registry;\n  private final String id;\n  private final Counter staticCounter;\n  private final Id dynamicId;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    this.registry = registry;\n    this.id = id;\n    staticCounter = registry.counter(\"staticId\", \"id\", id);\n    dynamicId = registry.createId(\"dynamicId\", \"id\", id);\n  }\n\n  public void doSomething(Context ctxt) {\n    // Keeping the reference to the counter avoids additional allocations\n    // to create the id object and the lookup cost\n    staticCounter.increment();\n\n    // If the id is dynamic it must be looked up\n    registry.counter(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo()).increment();\n\n    // This will update the same counter as the line above, but the base part\n    // of the id is precomputed to make it cheaper to construct the id.\n    registry.counter(dynamicId.withTag(\"foo\", ctxt.getFoo())).increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#dynamictimer","title":"DynamicTimer","text":"

    See the general overview of registration differences and summary of Timer usage.

    Servo:

    public class Foo {\n\n  private final String id;\n  private final MonitorConfig staticId;\n\n  public Foo(String id) {\n    this.id = id;\n    staticId = MonitorConfig.builder(\"staticId\").withTag(\"id\", id).build();\n  }\n\n  public void doSomething(Context ctxt) {\n    final long d = ctxt.getDurationMillis();\n    DynamicTimer.record(staticId, TimeUnit.SECONDS, d, TimeUnit.MILLISECONDS);\n\n    MonitorConfig dynamicId = MonitorConfig.builder(\"dynamicId\")\n      .withTag(\"id\", id)\n      .withTag(\"foo\", ctxt.getFoo())\n      .build();\n    DynamicTimer.record(dynamicId, TimeUnit.SECONDS, d, TimeUnit.MILLISECONDS);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Registry registry;\n  private final String id;\n  private final Timer staticTimer;\n  private final Id dynamicId;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    this.registry = registry;\n    this.id = id;\n    staticTimer = registry.timer(\"staticId\", \"id\", id);\n    dynamicId = registry.createId(\"dynamicId\", \"id\", id);\n  }\n\n  public void doSomething(Context ctxt) {\n    final long d = ctxt.getDurationMillis();\n\n    // Keeping the reference to the timer avoids additional allocations\n    // to create the id object and the lookup cost\n    staticTimer.record(d, TimeUnit.MILLISECONDS);\n\n    // If the id is dynamic it must be looked up\n    registry.timer(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo())\n      .record(d, TimeUnit.MILLISECONDS);\n\n    // This will update the same timer as the line above, but the base part\n    // of the id is precomputed to make it cheaper to construct the id.\n    registry.timer(dynamicId.withTag(\"foo\", ctxt.getFoo()))\n      .record(d, TimeUnit.MILLISECONDS);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#longgauge","title":"LongGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final LongGauge g = new LongGauge(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final AtomicLong v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicLong());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#monitorconfig","title":"MonitorConfig","text":"

    See the documentation on naming.

    Servo:

    MonitorConfig id = MonitorConfig.builder(\"name\")\n  .withTag(\"country\", \"US\")\n  .withTag(\"device\",  \"xbox\")\n  .build();\n

    Spectator:

    Id id = registry.createId(\"name\")\n  .withTag(\"country\", \"US\")\n  .withTag(\"device\",  \"xbox\");\n\n// or\n\nId id = registry.createId(\"name\", \"country\", \"US\", \"device\", \"xbox\");\n
    "},{"location":"spectator/lang/java/servo-migration/#monitoredcache","title":"MonitoredCache","text":"

    Not supported because Spectator does not have a direct dependency on Guava. If there is enough demand, an extension can be created.

    "},{"location":"spectator/lang/java/servo-migration/#numbergauge","title":"NumberGauge","text":"

    See the general overview of registration differences and summary of gauge usage.

    Servo:

    public class Foo {\n  private final NumberGauge g = new NumberGauge(\n    MonitorConfig.builder(\"name\").build(), new AtomicLong());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final AtomicLong v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicLong());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#statstimer","title":"StatsTimer","text":"

    Not supported, see overview of differences.

    "},{"location":"spectator/lang/java/servo-migration/#stepcounter","title":"StepCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n  private final Counter c =\n    new StepCounter(MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Counter c;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    c = registry.counter(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/","title":"Testing","text":"

    Testing should be relatively straightforward if you are using injection for the Registry. Consider a sample class:

    public class Foo {\n\n  private final Counter counter;\n\n  @Inject\n  public Foo(Registry registry) {\n    counter = registry.counter(\"foo\");\n  }\n\n  public void doSomething() {\n    counter.increment();\n  }\n}\n

    Tests will typically want to use an isolated instance of the DefaultRegistry.

    "},{"location":"spectator/lang/java/testing/#simple-test","title":"Simple Test","text":"

    A basic standalone test class would look something like:

    public class FooTest {\n\n  private Registry registry;\n  private Foo foo;\n\n  @Before\n  public void init() {\n    registry = new DefaultRegistry();\n    foo = new Foo(registry);\n  }\n\n  @Test\n  public void doSomething() {\n    foo.doSomething();\n    Assert.assertEquals(1, registry.counter(\"foo\").count());\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/#spring-test","title":"Spring Test","text":"

    If using Spring, then you can create a binding for the DefaultRegistry, for example:

    public class FooTest {\n\n  private Registry registry;\n  private Foo foo;\n\n  @Configuration\n  public static class TestConfiguration {\n    @Bean\n    public Registry registry() {\n      return new DefaultRegistry();\n    }\n\n    @Bean\n    public Foo foo(Registry registry) {\n      return new Foo(registry);\n    }\n  }\n\n  private AnnotationConfigApplicationContext createContext() {\n    AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();\n    context.register(TestConfiguration.class);\n    context.refresh();\n    return context;\n  }\n\n  @Test\n  public void doSomething() {\n    try (AnnotationConfigApplicationContext context = createContext()) {\n      Foo foo = context.getBean(Foo.class);\n      foo.doSomething();\n\n      Registry registry = context.getBean(Registry.class);\n      Assert.assertEquals(1, registry.counter(\"foo\").count());\n    }\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/#exceptions","title":"Exceptions","text":"

    By default, for most user errors Spectator will log a warning rather than throw an exception. The rationale is that users do not often think about instrumentation and logging code causing an exception and interrupting the control flow of a program. However, for test cases it is recommended to be more aggressive and learn about problems as early as possible. This can be done by setting a system property:

    spectator.api.propagateWarnings=true\n

    Consider an example:

    private static final Id RARE_EXCEPTION_ID = null;\n\npublic void doSomethingImportant() {\n  try {\n    ... do work ...\n  } catch (RareException e) {\n    // There is a bug in the program, an Id is not allowed to be null. In production we do\n    // not want it to throw and interrupt the control flow. Instrumentation should gracefully\n    // degrade.\n    registry.counter(RARE_EXCEPTION_ID).increment();\n\n    // These statements are important to provide context for operating the system\n    // and to ensure the app continues to function properly.\n    LOGGER.error(\"important context for user\", e);\n    properlyHandleException(e);\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/","title":"Usage","text":""},{"location":"spectator/lang/java/usage/#project","title":"Project","text":"
    • Source
    • Javadoc
    • Product Lifecycle: GA
    • Requirements: Java >= 8
    "},{"location":"spectator/lang/java/usage/#install-library","title":"Install Library","text":"
    1. Depend on the API library, which is available in Maven Central. The only transitive dependency is slf4j. For Gradle, the dependency is specified as follows:

      dependencies {\n    compile \"com.netflix.spectator:spectator-api:0.101.0\"\n}\n
    2. Pick a Registry to bind, when initializing the application.

    3. If running at Netflix, see the Netflix Integration section.

    "},{"location":"spectator/lang/java/usage/#instrumenting-code","title":"Instrumenting Code","text":"

    Suppose we have a server and we want to keep track of:

    • Number of requests received with dimensions for breaking down by status code, country, and the exception type if the request fails in an unexpected way.
    • Latency for handling requests.
    • Summary of the response sizes.
    • Current number of active connections on the server.

    Here is some sample code that does that:

    // In the application initialization setup a registry\nRegistry registry = new DefaultRegistry();\nServer s = new Server(registry);\n\npublic class Server {\n  private final Registry registry;\n  private final Id requestCountId;\n  private final Timer requestLatency;\n  private final DistributionSummary responseSizes;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n\n    // Create a base id for the request count. The id will get refined with\n    // additional dimensions when we receive a request.\n    requestCountId = registry.createId(\"server.requestCount\");\n\n    // Create a timer for tracking the latency. The reference can be held onto\n    // to avoid additional lookup cost in critical paths.\n    requestLatency = registry.timer(\"server.requestLatency\");\n\n    // Create a distribution summary meter for tracking the response sizes.\n    responseSizes = registry.distributionSummary(\"server.responseSizes\");\n\n    // Gauge type that can be sampled. In this case it will invoke the\n    // specified method via reflection to get the value. The registry will\n    // keep a weak reference to the object passed in so that registration will\n    // not prevent garbage collection of the server object.\n    registry.methodValue(\"server.numConnections\", this, \"getNumConnections\");\n  }\n\n  public Response handle(Request req) {\n    final long s = System.nanoTime();\n    requestLatency.record(() -> {\n      try {\n        Response res = doSomething(req);\n\n        // Update the counter id with dimensions based on the request. The\n        // counter will then be looked up in the registry which should be\n        // fairly cheap, such as lookup of id object in a ConcurrentHashMap.\n        // However, it is more expensive than having a local variable seti\n        // to the counter.\n        final Id cntId = requestCountId\n          .withTag(\"country\", req.country())\n          .withTag(\"status\", res.status());\n        registry.counter(cntId).increment();\n\n        responseSizes.record(res.body().size());\n\n        return res;\n      } catch (Exception e) {\n        final Id cntId = requestCountId\n          .withTag(\"country\", req.country())\n          .withTag(\"status\", \"exception\")\n          .withTag(\"error\", e.getClass().getSimpleName());\n        registry.counter(cntId).increment();\n        throw e;\n      }\n    });\n  }\n\n  public int getNumConnections() {\n    // however we determine the current number of connections on the server\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/#netflix-integration","title":"Netflix Integration","text":"

    When running at Netflix, use the atlas-client library to enable transferring the instrumented data to Atlas. See the appropriate section for the type of project you are working on:

    • Libraries
    • SBN Applications, specifically standalone apps using SBN.
    "},{"location":"spectator/lang/java/usage/#libraries","title":"Libraries","text":"

    For libraries, the only dependency that should be needed is:

    com.netflix.spectator:spectator-api:0.101.0\n

    The bindings to integrate internally should be included with the application. In your code, just inject a Registry, e.g.:

    public class Foo {\n  @Inject\n  public Foo(Registry registry) {\n    ...\n  }\n  ...\n}\n

    See the testing docs for more information about creating a binding to use with tests. Libraries should not install a particular registry. The bindings to use for the Registry should be determined by the application that is using the library. Think of it as being like slf4j where logging configuration is up to the end-user, not the library owner.

    You may want to avoid binding errors if the end-user has not provided a binding for the Spectator registry. For Spring, this can be done by using optional injections, for example:

    // Sample library class\npublic class MyLib {\n  Registry registry;\n\n  @Inject\n  public MyLib(Optional<Registry> registryOpt) {\n    this.registry = registryOpt.orElseGet(NoopRegistry::new);\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/#sbn-applications","title":"SBN Applications","text":"

    Applications should include spring-boot-netflix-starter-metrics which will configure the registry bindings for internal use.

    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/","title":"Buffer Pools","text":"

    Buffer pools, such as direct byte buffers, can be monitored at a high level using the BufferPoolMXBean provided by the JDK.

    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#getting-started","title":"Getting Started","text":"

    To get information about buffer pools in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#jvmbuffercount","title":"jvm.buffer.count","text":"

    Gauge showing the current number of distinct buffers.

    Unit: count

    Dimensions:

    • id: type of buffers. Value will be either direct for direct byte buffers or mapped for memory mapped files.
    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#jvmbuffermemoryused","title":"jvm.buffer.memoryUsed","text":"

    Gauge showing the current number of bytes used by all buffers.

    Unit: bytes

    Dimensions:

    • id: type of buffers. Value will be either direct for direct byte buffers or mapped for memory mapped files.
    "},{"location":"spectator/lang/java/ext/jvm-classloading/","title":"Class Loading","text":"

    Uses the ClassLoadingMXBean provided by the JDK to monitor the number of classes loaded and unloaded.

    "},{"location":"spectator/lang/java/ext/jvm-classloading/#getting-started","title":"Getting Started","text":"

    To get information about classloading in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-classloading/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-classloading/#jvmclassloadingclassesloaded","title":"jvm.classloading.classesLoaded","text":"

    Counter reporting the number of classes loaded.

    Unit: classes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-classloading/#jvmclassloadingclassesunloaded","title":"jvm.classloading.classesUnloaded","text":"

    Counter reporting the number of classes unloaded.

    Unit: classes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-compilation/","title":"Compilation","text":"

    Uses the CompilationMXBean provided by the JDK to monitor the time spent compiling code, for each compiler name.

    "},{"location":"spectator/lang/java/ext/jvm-compilation/#getting-started","title":"Getting Started","text":"

    To get information about compilation in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-compilation/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-compilation/#jvmcompilationcompilationtime","title":"jvm.compilation.compilationTime","text":"

    Counter reporting the amount of elapsed time spent in compilation. If multiple threads are used for compilation, then this value represents the summation of the time each thread spent in compilation.

    Unit: seconds/second

    Dimensions:

    • compiler: name of the just-in-time (JIT) compiler
    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/","title":"GC Causes","text":"

    The various GC causes aren't well documented. The list provided here comes from the gcCause.cpp file in the jdk and we include some information on what these mean for the application.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#systemgc__","title":"System.gc__","text":"

    Something called System.gc(). If you are seeing this once an hour it is likely related to the RMI GC interval. For more details see:

    • Unexplained System.gc() calls due to Remote Method Invocation (RMI) or explict garbage collections
    • sun.rmi.dgc.client.gcInterval
    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#fullgcalot","title":"FullGCAlot","text":"

    Most likely you'll never see this value. In debug builds of the jdk there is an option, -XX:+FullGCALot, that will trigger a full GC at a regular interval for testing purposes.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#scavengealot","title":"ScavengeAlot","text":"

    Most likely you'll never see this value. In debug builds of the jdk there is an option, -XX:+ScavengeALot, that will trigger a minor GC at a regular interval for testing purposes.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#allocation_profiler","title":"Allocation_Profiler","text":"

    Prior to java 8 you would see this if running with the -Xaprof setting. It would be triggered just before the jvm exits. The -Xaprof option was removed in java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#jvmtienv_forcegarbagecollection","title":"JvmtiEnv_ForceGarbageCollection","text":"

    Something called the JVM tool interface function ForceGarbageCollection. Look at the -agentlib param to java to see what agents are configured.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#gclocker_initiated_gc","title":"GCLocker_Initiated_GC","text":"

    The GC locker prevents GC from occurring when JNI code is in a critical region. If GC is needed while a thread is in a critical region, then it will allow them to complete, i.e. call the corresponding release function. Other threads will not be permitted to enter a critical region. Once all threads are out of critical regions a GC event will be triggered.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#heap_inspection_initiated_gc","title":"Heap_Inspection_Initiated_GC","text":"

    GC was initiated by an inspection operation on the heap. For example you can trigger this with jmap:

    $ jmap -histo:live <pid>

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#heap_dump_initiated_gc","title":"Heap_Dump_Initiated_GC","text":"

    GC was initiated before dumping the heap. For example you can trigger this with jmap:

    $ jmap -dump:live,format=b,file=heap.out <pid>

    Another common example would be clicking the Heap Dump button on the Monitor tab in VisualVM.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#whitebox_initiated_young_gc","title":"WhiteBox_Initiated_Young_GC","text":"

    Most likely you'll never see this value. Used for testing hotspot, it indicates something called sun.hotspot.WhiteBox.youngGC().

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#no_gc","title":"No_GC","text":"

    Used for CMS to indicate concurrent phases.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#allocation_failure","title":"Allocation_Failure","text":"

    Usually this means that there is an allocation request that is bigger than the available space in young generation and will typically be associated with a minor GC. For G1 this will likely be a major GC and it is more common to see G1_Evacuation_Pause for routine minor collections.

    On linux the jvm will trigger a GC if the kernel indicates there isn't much memory left via mem_notify.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#tenured_generation_full","title":"Tenured_Generation_Full","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#permanent_generation_full","title":"Permanent_Generation_Full","text":"

    Triggered as a result of an allocation failure in PermGen. Pre java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#metadata_gc_threshold","title":"Metadata_GC_Threshold","text":"

    Triggered as a result of an allocation failure in Metaspace. Metaspace replaced PermGen was added in java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_generation_full","title":"CMS_Generation_Full","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_initial_mark","title":"CMS_Initial_Mark","text":"

    Initial mark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_final_remark","title":"CMS_Final_Remark","text":"

    Remark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_concurrent_mark","title":"CMS_Concurrent_Mark","text":"

    Concurrent mark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#old_generation_expanded_on_last_scavenge","title":"Old_Generation_Expanded_On_Last_Scavenge","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#old_generation_too_full_to_scavenge","title":"Old_Generation_Too_Full_To_Scavenge","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#ergonomics","title":"Ergonomics","text":"

    This indicates you are using the adaptive size policy, -XX:+UseAdaptiveSizePolicy and is on by default for recent versions, with the parallel collector (-XX:+UseParallelGC). For more details see The Why of GC Ergonomics.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#g1_evacuation_pause","title":"G1_Evacuation_Pause","text":"

    An evacuation pause is the most common young gen cause for G1 and indicates that it is copying live objects from one set of regions, young and sometimes young + old, to another set of regions. For more details see Understanding G1 GC Logs.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#g1_humongous_allocation","title":"G1_Humongous_Allocation","text":"

    A humongous allocation is one where the size is greater than 50% of the G1 region size. Before a humongous allocation the jvm checks if it should do a routine evacuation pause without regard to the actual allocation size, but if triggered due to this check the cause will be listed as humongous allocation. This cause is also used for any collections used to free up enough space for the allocation.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#last_ditch_collection","title":"Last_ditch_collection","text":"

    For perm gen (java 7 or earlier) and metaspace (java 8+) a last ditch collection will be triggered if an allocation fails and the memory pool cannot be expanded.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#illegal_value_-last_gc_cause-_illegal_value","title":"ILLEGAL_VALUE_-last_gc_cause-_ILLEGAL_VALUE","text":"

    Included for completeness, but you should never see this value.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#unknown_gccause","title":"unknown_GCCause","text":"

    Included for completeness, but you should never see this value.

    "},{"location":"spectator/lang/java/ext/jvm-gc/","title":"Garbage Collection","text":"

    The GC module registers with the notification emitter of the GarbageCollectorMXBean to provide some basic GC logging and metrics.

    • Getting started
    • Logging
    • Metrics
    • Alerting
    "},{"location":"spectator/lang/java/ext/jvm-gc/#getting-started","title":"Getting Started","text":"

    For using it internally at Netflix, see the Java Usage guide, otherwise keep reading this section.

    "},{"location":"spectator/lang/java/ext/jvm-gc/#requirements","title":"Requirements","text":"

    This library relies on the notification emitter added in 7u4, but there are known issues prior to 7u40. There is also a regression impacting Java 9 and higher, see #502 and JDK-8196325 for more information. For G1, it is recommended to be on the latest version available.

    "},{"location":"spectator/lang/java/ext/jvm-gc/#dependencies","title":"Dependencies","text":"
    com.netflix.spectator:spectator-ext-gc:0.101.0\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#start-reporting","title":"Start Reporting","text":"

    Then in the initialization for the application:

    import com.netflix.spectator.gc.GcLogger;\n...\n// Keep a single instance of the logger\nGcLogger gc = new GcLogger();\ngc.start(null);\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#logging","title":"Logging","text":"

    After GC events, an DEBUG level log message will get reported using slf4j. This makes it easy to see GC events in the context of other log messages for the application. The logger name is com.netflix.spectator.gc.GcLogger and the message will look like:

    ${GC_TYPE}: ${COLLECTOR_NAME}, id=${N}, at=${START_TIME}, duration=${T}ms,\ncause=[${CAUSE}], ${TOTAL_USAGE_BEFORE} => ${TOTAL_USAGE_AFTER} / ${MAX_SIZE}\n(${PERCENT_USAGE_BEFORE} => ${PERCENT_USAGE_AFTER})\n

    The id can be used to verify events were not skipped or correlate with other sources like detailed GC logs. See GC causes for more details on the possible causes.

    Sample:

    2014-08-31 02:02:24,724  DEBUG [com.netflix.spectator.gc.GcLogger] YOUNG: ParNew,\nid=5281, at=Sun Aug 31 02:02:24 UTC 2014, duration=2ms, cause=[Allocation Failure],\n0.4G => 0.3G / 1.8G (24.3% => 16.6%)\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcallocationrate","title":"jvm.gc.allocationRate","text":"

    The allocation rate measures how fast the application is allocating memory. It is a counter that is incremented after a GC event by the amount youngGen.sizeBeforeGC.

    Technically, right now it is:

    youngGen.sizeBeforeGC - youngGen.sizeAfterGC\n

    However, youngGen.sizeAfterGC should be 0 and thus the size of young gen before the GC is the amount allocated since the previous GC event.

    Unit: bytes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcpromotionrate","title":"jvm.gc.promotionRate","text":"

    The promotion rate measures how fast data is being moved from young generation into the old generation.

    It is a counter that is incremented after a GC event by the amount:

    abs(oldGen.sizeAfterGC - oldGen.sizeBeforeGC)\n

    Unit: bytes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgclivedatasize","title":"jvm.gc.liveDataSize","text":"

    The live data size is the size of the old generation after a major GC.

    The image below shows how the live data size view compares to a metric showing the current size of the memory pool:

    Unit: bytes

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcmaxdatasize","title":"jvm.gc.maxDataSize","text":"

    Maximum size for the old generation. Primary use-case is for gaining perspective on the the live data size.

    Unit: bytes

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcpause","title":"jvm.gc.pause","text":"

    Pause time for a GC event. All of the values reported are stop the world pauses.

    Unit: seconds

    Dimensions:

    • action: action performed by the garbage collector (getGcAction). There is no guarantee, but the typical values seen are end_of_major_GC and end_of_minor_GC.
    • cause: cause that instigated GC (getGcCause). For an explanation of common causes see the GC causes page.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcconcurrentphasetime","title":"jvm.gc.concurrentPhaseTime","text":"

    Time spent in concurrent phases of CMS pauses.

    Unit: seconds

    Dimensions:

    • action: action performed by the garbage collector (getGcAction). There is no guarantee, but the typical values seen are end_of_major_GC and end_of_minor_GC.
    • cause: cause that instigated GC (getGcCause). For an explanation of common causes see the GC causes page.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#alerting","title":"Alerting","text":"

    This section assumes the data is available in Atlas, but users of other systems should be able to take the idea and make it work. For all of these alerts it is recommended to check them on instance. At Netflix that can be done by selecting the option in alert ui:

    "},{"location":"spectator/lang/java/ext/jvm-gc/#max-pause-time","title":"Max Pause Time","text":"

    Example to trigger an alert if the pause time exceeds 500 milliseconds:

    name,jvm.gc.pause,:eq,\nstatistic,max,:eq,\n:and,\n:max,(,cause,),:by,\n0.5,:gt,\n$cause,:legend\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#heap-pressure","title":"Heap Pressure","text":"

    Example to trigger an alert if the live data size is over 70% of the heap:

    name,jvm.gc.liveDataSize,:eq,:max,\nname,jvm.gc.maxDataSize,:eq,:max,\n:div,100,:mul,\n70,:gt,\npercentUsed,:legend\n
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/","title":"Memory Pools","text":"

    Uses the MemoryPoolMXBean provided by the JDK to monitor the sizes of java memory spaces such as perm gen, eden, old gen, etc.

    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#getting-started","title":"Getting Started","text":"

    To get information about memory pools in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemoryused","title":"jvm.memory.used","text":"

    Gauge reporting the current amount of memory used. For the young and old gen pools this metric will typically have a sawtooth pattern. For alerting or detecting memory pressure the live data size is probably a better option.

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemorycommitted","title":"jvm.memory.committed","text":"

    Gauge reporting the current amount of memory committed. From the javadocs, committed is:

    The amount of memory (in bytes) that is guaranteed to be available for use by the Java virtual machine. The amount of committed memory may change over time (increase or decrease). The Java virtual machine may release memory to the system and committed could be less than init. committed will always be greater than or equal to used.

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemorymax","title":"jvm.memory.max","text":"

    Gauge reporting the max amount of memory that can be used. From the javadocs, max is:

    The maximum amount of memory (in bytes) that can be used for memory management. Its value may be undefined. The maximum amount of memory may change over time if defined. The amount of used and committed memory will always be less than or equal to max if max is defined. A memory allocation may fail if it attempts to increase the used memory such that used > committed even if used <= max would still be true (for example, when the system is low on virtual memory).

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#metric-dimensions","title":"Metric Dimensions","text":"

    All memory metrics have the following dimensions:

    • id: name of the memory pool being reported. The names of the pools vary depending on the garbage collector algorithm being used.
    • memtype: type of memory. It has two possible values: HEAP and NON_HEAP. For more information see the javadocs for MemoryType.
    "},{"location":"spectator/lang/java/ext/jvm-safepoint/","title":"Safepoint","text":"

    Uses Hotspot mbean to access the spent in and getting to safepoints.

    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#getting-started","title":"Getting Started","text":"

    To get information about compilation in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-safepoint/#jvmhotspotsafepointtime","title":"jvm.hotspot.safepointTime","text":"

    Timer reporting the amount of time the application has been stopped for safepoint operations.

    Unit: seconds

    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#jvmhotspotsafepointsynctime","title":"jvm.hotspot.safepointSyncTime","text":"

    Timer reporting the amount of time spent getting to safepoints.

    Unit: seconds

    "},{"location":"spectator/lang/java/ext/jvm-threads/","title":"Threads","text":"

    Uses the ThreadMXBean provided by the JDK to monitor the number of active threads and threads started.

    "},{"location":"spectator/lang/java/ext/jvm-threads/#getting-started","title":"Getting Started","text":"

    To get information about threads in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-threads/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-threads/#jvmthreadthreadcount","title":"jvm.thread.threadCount","text":"

    Gauge reporting the number of active threads.

    Unit: threads

    Dimensions:

    • id: thread category, either daemon or non-daemon
    "},{"location":"spectator/lang/java/ext/jvm-threads/#jvmthreadthreadsstarted","title":"jvm.thread.threadsStarted","text":"

    Counter reporting the number of threads started.

    Unit: threads/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/log4j1/","title":"Log4j1 Appender","text":"

    Custom appender for log4j1 to track the number of log messages reported.

    Note

    Log4j 1.x has reached end of life and is no longer supported by Apache. This extension is provided for some users that have difficulty moving to a supported version of log4j.

    "},{"location":"spectator/lang/java/ext/log4j1/#getting-started","title":"Getting Started","text":"

    To use it simply add a dependency:

    com.netflix.spectator:spectator-ext-log4j1:0.101.0\n

    Then in your log4j configuration specify the com.netflix.spectator.log4j.SpectatorAppender. In a properties file it would look something like:

    log4j.rootLogger=ALL, A1\nlog4j.appender.A1=com.netflix.spectator.log4j.SpectatorAppender\n
    "},{"location":"spectator/lang/java/ext/log4j1/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/log4j1/#log4jnummessages","title":"log4j.numMessages","text":"

    Counters showing the number of messages that have been passed to the appender.

    Unit: messages/second

    Dimensions:

    • loglevel: standard log level of the events.
    "},{"location":"spectator/lang/java/ext/log4j1/#log4jnumstacktraces","title":"log4j.numStackTraces","text":"

    Counter for the number of messages with stack traces written to the logs.

    Unit: messages/second

    Dimensions:

    • loglevel: standard log level of the events.
    • exception: simple class name for the exception that was thrown.
    • file: file name for where the exception was thrown.
    "},{"location":"spectator/lang/java/ext/log4j2/","title":"Log4j2 Appender","text":"

    Custom appender for log4j2 to track the number of log messages reported.

    "},{"location":"spectator/lang/java/ext/log4j2/#getting-started","title":"Getting Started","text":"

    To use it simply add a dependency:

    com.netflix.spectator:spectator-ext-log4j2:0.101.0\n

    Then in your application initialization:

    Registry registry = ...\nSpectatorAppender.addToRootLogger(\n    registry,             // Registry to use\n    \"spectator\",          // Name for the appender\n    false);               // Should stack traces be ignored?\n

    This will add the appender to the root logger and register a listener so it will get re-added if the configuration changes. You can also use the appender by specifying it in the log4j2 configuration, but this will cause some of the loggers in Spectator to get created before log4j is properly initialized and result in some lost log messages. With that caveat in mind, if you need the additional flexibility of using the configuration then specify the Spectator appender:

    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration monitorInterval=\"5\" status=\"warn\">\n  <Appenders>\n    <Spectator name=\"root\"/>\n  </Appenders>\n  <Loggers>\n    <Root level=\"debug\">\n      <AppenderRef ref=\"root\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
    "},{"location":"spectator/lang/java/ext/log4j2/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/log4j2/#log4jnummessages","title":"log4j.numMessages","text":"

    Counters showing the number of messages that have been passed to the appender.

    Unit: messages/second

    Dimensions:

    • appender: name of the spectator appender.
    • loglevel: standard log level of the events.
    "},{"location":"spectator/lang/java/ext/log4j2/#log4jnumstacktraces","title":"log4j.numStackTraces","text":"

    Counter for the number of messages with stack traces written to the logs. This will only be collected if the ignoreExceptions flag is set to false for the appender.

    Unit: messages/second

    Dimensions:

    • appender: name of the spectator appender.
    • loglevel: standard log level of the events.
    • exception: simple class name for the exception that was thrown.
    • file: file name for where the exception was thrown.
    "},{"location":"spectator/lang/java/ext/placeholders/","title":"Placeholders","text":"

    The placeholders extension allows for identifiers to be created with dimensions that will get filled in based on the context when an activity occurs. The primary use-cases are to support:

    1. Optional dimensions that can be conditionally enabled.
    2. Pulling dimensions from another context such as a thread local store. This can make it is easier to share the across various parts of the code.
    "},{"location":"spectator/lang/java/ext/placeholders/#dependencies","title":"Dependencies","text":"

    To use the placeholders support add a dependency on:

    com.netflix.spectator:spectator-ext-placeholders:0.101.0\n
    "},{"location":"spectator/lang/java/ext/placeholders/#usage","title":"Usage","text":"

    Placeholder support is available for activity based types including counters, timers, and distribution summaries. To get started create a PlaceholderFactory from the registry:

    PlaceholderFactory factory = PlaceholderFactory.from(registry);\n

    Then use the factory to create an identifier using a TagFactory to dynamically fetch the value for a given dimension when some activity occurs. Suppose we want to use a dynamic configuration library such as Archaius to conditionally enable a dimension with high cardinality:

    public class Server {\n\n  private final Context context;\n  private final Counter rps;\n\n  public Server(Context context, PropertyFactory props, Registry registry) {\n    this.context = context;\n\n    // Property that can be dynamically updated to indicate whether or not\n    // detailed dimensions should be added to metrics.\n    Property<Boolean> enabled = props\n      .getProperty(\"server.detailedMetricsEnabled\")\n      .asBoolean(false);\n\n    // Factory for creating instances of the counter using placeholders\n    PlaceholderFactory factory = PlaceholderFactory.from(registry);\n\n    // Create the underlying id with 4 possible dimensions:\n    // *  method and status - low cardinality and always added if available\n    //    in the context.\n    // *  geo and device - high cardinality and only available if the property\n    //    to enable detailed metrics is set to true.\n    PlaceholderId rpsId = factory.createId(\"server.requests\")\n      .withTagFactory(TagFactory.from(\"method\", context::getMethod))\n      .withTagFactory(TagFactory.from(\"status\", context::getStatus))\n      .withTagFactory(new DetailedDimension(\"geo\", enabled, context::getGeo))\n      .withTagFactory(new DetailedDimension(\"device\", enabled, context::getDevice));\n    rps = factory.counter(rpsId);\n  }\n\n  public Response handle(Request request) {\n    fillInContext(request);\n    Response response = process(request);\n    fillInContext(response);\n\n    // Update the counter, the placeholders will be resolved when the activity, in\n    // this case the increment is called.\n    rps.increment();\n    return response;\n  }\n\n  // Tag factory that can be controlled with an enabled property.\n  private static class DetailedDimension implements TagFactory {\n\n    private final String name;\n    private final Supplier<String> valueFunc;\n\n    DetailedDimension(String name, Property<Boolean> enabled, Supplier<String> valueFunc) {\n      this.name = name;\n      this.enabled = enabled;\n      this.valueFunc = valueFunc;\n    }\n\n    @Override public String name() {\n      return name;\n    }\n\n    @Override public Tag createTag() {\n      return enabled.get()\n          ? new BasicTag(name, valueFunc.get())\n          : null;\n    }\n  }\n}\n
    "},{"location":"spectator/lang/java/ext/thread-pools/","title":"Thread Pools","text":"

    Java's ThreadPoolExecutor exposes several properties that are useful to monitor to assess the health, performance, and configuration of the pool.

    "},{"location":"spectator/lang/java/ext/thread-pools/#getting-started","title":"Getting Started","text":"

    To report thread pool metrics, one can attach a ThreadPoolMonitor in the following manner:

    import com.netflix.spectator.api.patterns.ThreadPoolMonitor;\n\nThreadPoolMonitor.attach(registry, myThreadPoolExecutor, \"my-thread-pool\");\n

    The thread pool's properties will be polled regularly in the background and will report metrics to the provided registry. The third parameter will be added to each metric as an id dimension, if provided. However, if the value is null or an empty string, then a default will be used as the id.

    "},{"location":"spectator/lang/java/ext/thread-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/thread-pools/#threadpooltaskcount","title":"threadpool.taskCount","text":"

    Counter of the total number of tasks that have been scheduled.

    Unit: tasks/second

    Data Source: ThreadPoolExecutor#getTaskCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcompletedtaskcount","title":"threadpool.completedTaskCount","text":"

    Counter of the total number of tasks that have completed.

    Unit: tasks/second

    Data Source: ThreadPoolExecutor#getCompletedTaskCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcurrentthreadsbusy","title":"threadpool.currentThreadsBusy","text":"

    Gauge showing the current number of threads actively doing work.

    Unit: count

    Data Source: ThreadPoolExecutor#getActiveCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolmaxthreads","title":"threadpool.maxThreads","text":"

    Gauge showing the current maximum number of threads configured for the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getMaximumPoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolpoolsize","title":"threadpool.poolSize","text":"

    Gauge showing the current size of the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getPoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcorepoolsize","title":"threadpool.corePoolSize","text":"

    Gauge showing the current maximum number of core threads configured for the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getCorePoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolqueuesize","title":"threadpool.queueSize","text":"

    Gauge showing the current number of threads queued for execution.

    Unit: count

    Data Source: ThreadPoolExecutor#getQueue().size()

    "},{"location":"spectator/lang/java/meters/counter/","title":"Java Counters","text":"

    Counters are created using the Registry, which is be setup as part of application initialization. For example:

    public class Queue {\n\n  private final Counter insertCounter;\n  private final Counter removeCounter;\n  private final QueueImpl impl;\n\n  @Inject\n  public Queue(Registry registry) {\n    insertCounter = registry.counter(\"queue.insert\");\n    removeCounter = registry.counter(\"queue.remove\");\n    impl = new QueueImpl();\n  }\n

    Then call increment when an event occurs:

      public void insert(Object obj) {\n    insertCounter.increment();\n    impl.insert(obj);\n  }\n\n  public Object remove() {\n    if (impl.nonEmpty()) {\n      removeCounter.increment();\n      return impl.remove();\n    } else {\n      return null;\n    }\n  }\n

    Optionally, an amount can be passed in when calling increment. This is useful when a collection of events happen together.

      public void insertAll(Collection<Object> objs) {\n    insertCounter.increment(objs.size());\n    impl.insertAll(objs);\n  }\n}\n
    "},{"location":"spectator/lang/java/meters/dist-summary/","title":"Java Distribution Summaries","text":"

    Distribution Summaries are created using the Registry, which will be setup as part of application initialization. For example:

    public class Server {\n\n  private final DistributionSummary requestSize;\n\n  @Inject\n  public Server(Registry registry) {\n    requestSize = registry.distributionSummary(\"server.requestSize\");\n  }\n

    Then call record when an event occurs:

      public Response handle(Request request) {\n    requestSize.record(request.sizeInBytes());\n  }\n}\n
    "},{"location":"spectator/lang/java/meters/gauge/","title":"Java Gauges","text":""},{"location":"spectator/lang/java/meters/gauge/#polled-gauges","title":"Polled Gauges","text":"

    The most common use of Gauges is by registering a hook with Spectator, so that it will poll the values in the background. This is done by using the PolledMeter helper class.

    A Polled Gauge is registered by passing in an id, a reference to the object, and a function to get or compute a numeric value based on the object. Note that a Gauge should only be registered once, not on each update. Consider this example of a web server tracking the number of connections:

    class HttpServer {\n  // Tracks the number of current connections to the server\n  private AtomicInteger numConnections;\n\n  public HttpServer(Registry registry) {\n    numConnections = PolledMeter.using(registry)\n      .withName(\"server.numConnections\")\n      .monitorValue(new AtomicInteger(0));\n  }\n\n  public void onConnectionCreated() {\n    numConnections.incrementAndGet();\n    ...\n  }\n\n  public void onConnectionClosed() {\n    numConnections.decrementAndGet();\n    ...\n  }\n\n  ...\n}\n

    The Spectator Registry will keep a weak reference to the object. If the object is garbage collected, then it will automatically drop the registration. In the example above, the Registry will have a weak reference to numConnections and the server instance will have a strong reference to numConnections. If the server instance goes away, then the Gauge will as well.

    When multiple Gauges are registered with the same id, the reported value will be the sum of the matches. For example, if multiple instances of the HttpServer class were created on different ports, then the value server.numConnections would be the total number of connections across all server instances. If a different behavior is desired, then ensure your usage does not perform multiple registrations.

    There are several different ways to register a Gauge:

    "},{"location":"spectator/lang/java/meters/gauge/#using-number","title":"Using Number","text":"

    A Gauge can also be created based on an implementation of Number. Note the Number implementation should be thread-safe. For example:

    AtomicInteger size = new AtomicInteger();\nPolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(size);\n

    The call will return the Number so the registration can be inline on the assignment:

    AtomicInteger size = PolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(new AtomicInteger());\n

    Updates to the value are performed by updating the Number instance directly.

    "},{"location":"spectator/lang/java/meters/gauge/#using-lambda","title":"Using Lambda","text":"

    Specify a lambda that takes the object as parameter.

    public class Queue {\n\n  @Inject\n  public Queue(Registry registry) {\n    PolledMeter.using(registry)\n      .withName(\"queue.size\")\n      .monitorValue(this, Queue::size);\n  }\n\n  ...\n}\n

    Warning

    Be careful to avoid creating a reference to the object in the lambda. It will prevent garbage collection and can lead to a memory leak in the application. For example, by calling size without using the passed in object there will be a reference to this:

    PolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(this, obj -> size());\n
    "},{"location":"spectator/lang/java/meters/gauge/#collection-sizes","title":"Collection Sizes","text":"

    For classes that implement Collection or Map, there are helpers:

    Queue queue = new LinkedBlockingQueue();\nPolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorSize(queue);\n\nMap<String, String> cache = new ConcurrentMap<>();\nPolledMeter.using(registry)\n  .withName(\"cache.size\")\n  .monitorSize(cache);\n
    "},{"location":"spectator/lang/java/meters/gauge/#monotonic-counters","title":"Monotonic Counters","text":"

    A common technique used by some libraries is to expose a monotonically increasing counter that represents the number of events since the system was initialized. An example of that in the JDK is ThreadPoolExecutor.getCompletedTaskCount, which returns the number of completed tasks on the thread pool.

    For sources like this, the monitorMonotonicCounter method can be used:

    // For an implementation of Number\nLongAdder tasks = new LongAdder();\nPolledMeter.using(registry)\n  .withName(\"pool.completedTasks\")\n  .monitorMonotonicCounter(tasks);\n\n// Or using a lambda\nThreadPoolExecutor executor = ...\nPolledMeter.using(registry)\n  .withName(\"pool.completedTasks\")\n  .monitorMonotonicCounter(executor, ThreadPoolExecutor::getCompletedTaskCount);\n

    For thread pools specifically, there are better options for getting standard metrics. See the docs for the Thread Pools extension for more information.

    "},{"location":"spectator/lang/java/meters/gauge/#active-gauges","title":"Active Gauges","text":"

    Gauges can also be set directly by the user. In this mode, the user is responsible for regularly updating the value of the Gauge by calling set. Looking at the HttpServer example, with an active gauge, it would look like:

    class HttpServer {\n  // Tracks the number of current connections to the server\n  private AtomicInteger numConnections;\n  private Gauge gauge;\n\n  public HttpServer(Registry registry) {\n    numConnections = new AtomicInteger();\n    gauge = registry.gauge(\"server.numConnections\");\n    gauge.set(numConnections.get());\n  }\n\n  public void onConnectionCreated() {\n    numConnections.incrementAndGet();\n    gauge.set(numConnections.get());\n    ...\n  }\n\n  public void onConnectionClosed() {\n    numConnections.decrementAndGet();\n    gauge.set(numConnections.get());\n    ...\n  }\n\n  ...\n}\n
    "},{"location":"spectator/lang/java/meters/percentile-timer/","title":"Java Percentile Timers","text":"

    Note: Percentile timers generate a metric per bucket in the histogram. Create instances once per ID and reuse them as needed. Avoid adding tags with high cardinality as that increases the cardinality of the metric. If at all possible, use a Timer instead.

    To get started, create an instance using the Registry:

    public class Server {\n\n  private final Registry registry;\n  private final PercentileTimer requestLatency;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n    requestLatency = PercentileTimer.builder(registry)\n        .withId(registry.createId(\"server.request.latency\", \"status\", \"200\"))\n        .build();\n

    Then wrap the call you need to measure, preferably using a lambda:

      public Response handle(Request request) {\n    return requestLatency.recordRunnable(() -> handleImpl(request));\n  }\n

    The lambda variants will handle exceptions for you and ensure the record happens as part of a finally block using the monotonic time. It could also have been done more explicitly like:

      public Response handle(Request request) {\n    final long start = registry.clock().monotonicTime();\n    try {\n      return handleImpl(request);\n    } finally {\n      final long end = registry.clock().monotonicTime();\n      requestLatency.record(end - start, TimeUnit.NANOSECONDS);\n    }\n  }\n

    This example uses the Clock from the Registry, which can be useful for testing, if you need to control the timing. In actual usage, it will typically get mapped to the system clock. It is recommended to use a monotonically increasing source for measuring the times, to avoid occasionally having bogus measurements due to time adjustments. For more information, see the Clock documentation.

    "},{"location":"spectator/lang/java/meters/timer/","title":"Java Timers","text":""},{"location":"spectator/lang/java/meters/timer/#timer","title":"Timer","text":"

    To get started, create an instance using the Registry:

    public class Server {\n\n  private final Registry registry;\n  private final Timer requestLatency;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n    requestLatency = registry.timer(\"server.requestLatency\");\n  }\n

    Then wrap the call you need to measure, preferably using a lambda:

      public Response handle(Request request) {\n    return requestLatency.recordRunnable(() -> handleImpl(request));\n  }\n

    The lambda variants will handle exceptions for you and ensure the record happens as part of a finally block using the monotonic time. It could also have been done more explicitly like:

      public Response handle(Request request) {\n    final long start = registry.clock().monotonicTime();\n    try {\n      return handleImpl(request);\n    } finally {\n      final long end = registry.clock().monotonicTime();\n      requestLatency.record(end - start, TimeUnit.NANOSECONDS);\n    }\n  }\n

    This example uses the Clock from the Registry, which can be useful for testing, if you need to control the timing. In actual usage, it will typically get mapped to the system clock. It is recommended to use a monotonically increasing source for measuring the times, to avoid occasionally having bogus measurements due to time adjustments. For more information, see the Clock documentation.

    "},{"location":"spectator/lang/java/meters/timer/#longtasktimer","title":"LongTaskTimer","text":"

    To get started, create an instance using the Registry:

    import com.netflix.spectator.api.patterns.LongTaskTimer;\n\npublic class MetadataService {\n\n  private final LongTaskTimer metadataRefresh;\n\n  @Inject\n  public MetadataService(Registry registry) {\n    metadataRefresh = LongTaskTimer.get(\n        registry, registry.createId(\"metadata.refreshDuration\"));\n    // setup background thread to call refresh()\n  }\n\n  private void refresh() {\n    final int id = metadataRefresh.start();\n    try {\n      refreshImpl();\n    } finally {\n      metadataRefresh.stop(id);\n    }\n  }\n

    The id value returned by the start method is used to keep track of a particular task being measured by the LongTaskTimer. It must be stopped using the provided id. Note that unlike a regular Timer that does not do anything until the final duration is recorded, a LongTaskTimer will report as two Gauges:

    • duration: total duration spent within all currently running tasks.
    • activeTasks: number of currently running tasks.

    This means that you can see what is happening while the task is running, but you need to keep in mind:

    • The meter id is fixed before the task begins. There is no way to change tags based on the run, e.g., update a different Timer, if an exception is thrown.
    • Being a Gauge, it is inappropriate for short tasks. In particular, Gauges are sampled and if it is not sampled during the execution, or the sampling period is a significant subset of the expected duration, then the duration value will not be meaningful.
    "},{"location":"spectator/lang/java/registry/metrics3/","title":"Metrics3 Registry","text":"

    Registry that uses metrics3 as the underlying implementation. To use the metrics registry, add a dependency on the spectator-reg-metrics3 library. For gradle:

    com.netflix.spectator:spectator-reg-metrics3:0.101.0\n

    Then when initializing the application, use the MetricsRegistry. For more information see the metrics3 example.

    "},{"location":"spectator/lang/java/registry/overview/","title":"Registry","text":"

    The Registry is the main class for managing a set of meters. A Meter is a class for collecting a set of measurements about your application.

    "},{"location":"spectator/lang/java/registry/overview/#choose-implementation","title":"Choose Implementation","text":"

    The core Spectator library, spectator-api, comes with the following Registry implementations:

    Class Dependency Description DefaultRegistry spectator-api Updates local counters, frequently used with unit tests. NoopRegistry spectator-api Does nothing, tries to make operations as cheap as possible. This implementation is typically used to help understand the overhead being created due to instrumentation. It can also be useful in testing to help ensure that no side effects were introduced where the instrumentation is now needed in order for the application for function properly. MetricsRegistry spectator-reg-metrics3 Map to metrics3 library. This implementation is typically used for reporting to local files, JMX, or other backends like Graphite. Note that it uses a hierarchical naming scheme rather than the dimensional naming used by Spectator, so the names will get flattened when mapped to this Registry.

    It is recommended for libraries to write code against the Registry interface and allow the implementation to get injected by the user of the library. The simplest way is to accept the Registry via the constructor, for example:

    public class HttpServer {\n  public HttpServer(Registry registry) {\n    // use registry to collect measurements\n  }\n}\n

    The user of the class can then provide the implementation:

    Registry registry = new DefaultRegistry();\nHttpServer server = new HttpServer(registry);\n

    More complete examples can be found on the testing page or in the spectator-examples repo.

    "},{"location":"spectator/lang/java/registry/overview/#working-with-ids","title":"Working with Ids","text":"

    Spectator is primarily intended for collecting data for dimensional time series backends like Atlas. The ids used for looking up a Meter in the Registry consist of a name and set of tags. Ids will be consumed many times by users after the data has been reported, so they should be chosen with some care and thought about how they will get used. See the conventions page for some general guidelines.

    Ids are created via the Registry, for example:

    Id id = registry.createId(\"server.requestCount\");\n

    The ids are immutable, so they can be freely passed around and used in a concurrent context. Tags can be added when an id is created:

    Id id = registry.createId(\n    \"server.requestCount\",\n    \"status\", \"2xx\",\n    \"method\", \"GET\"\n);\n

    Or by using withTag and withTags on an existing id:

    public class HttpServer {\n  private final Id baseId;\n\n  public HttpServer(Registry registry) {\n    baseId = registry.createId(\"server.requestCount\");\n  }\n\n  private void handleRequestComplete(HttpRequest req, HttpResponse res) {\n    // Remember Id is immutable, withTags will return a copy with the\n    // the additional metadata\n    Id reqId = baseId.withTags(\n      \"status\", res.getStatus(),\n      \"method\", req.getMethod().name());\n    registry.counter(reqId).increment();\n  }\n\n  private void handleRequestError(HttpRequest req, Throwable t) {\n    // Can also be added individually using `withTag`. However, it is better\n    // for performance to batch modifications using `withTags`.\n    Id reqId = baseId\n      .withTag(\"error\",  t.getClass().getSimpleName())\n      .withTag(\"method\", req.getMethod().name());\n    registry.counter(reqId).increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/registry/overview/#collecting-measurements","title":"Collecting Measurements","text":"

    Once you have an id, the Registry can be used to get an instance of a Meter to record a measurement. Meters can roughly be categorized in two groups:

    "},{"location":"spectator/lang/java/registry/overview/#active","title":"Active","text":"

    Active Meters are ones that are called directly when some event occurs. There are three basic types supported:

    • Counters measure how often something is occurring. This will be reported to backend systems as a rate-per-second. For example, the number of requests processed by a web server.
    • Timers measure how long something took. For example, the latency of requests processed by a web server.
    • Distribution Summaries measure the size of something. For example, the entity sizes for requests processed by a web server.
    "},{"location":"spectator/lang/java/registry/overview/#passive","title":"Passive","text":"

    Passive Meters are ones where the Registry has a reference to get the value when needed. For example, the number of current connections on a web server or the number threads that are currently in use. These will be Gauges.

    "},{"location":"spectator/lang/java/registry/overview/#global-registry","title":"Global Registry","text":"

    There are some use-cases where injecting the Registry is not possible or is too cumbersome. The main example from the core Spectator libraries is the log4j appender. The Global Registry is useful there because logging is often initialized before any other systems and Spectator itself uses logging via the slf4j api which is quite likely being bound to log4j when that the appender is being used. By using the Global Registry, the logging initialization can proceed before the Spectator initialization in the application. Though any measurements taken before a Registry instance has been added will be lost.

    The Global Registry is accessed using:

    Registry registry = Spectator.globalRegistry();\n

    By default, it will not record anything. For a specific registry instance you can choose to configure it to work with the Global Registry by calling add:

    public void init() {\n  Registry registry = // Choose an appropriate implementation\n\n  // Add it to the global registry so it will receive\n  // any activity on the global registry\n  Spectator.globalRegistry().add(registry);\n}\n

    Any measurements taken while no Registries are added to the global instance will be lost. If multiple Registries are added, all will receive updates made to the Global Registry.

    "},{"location":"spectator/lang/nodejs/usage/","title":"Usage","text":""},{"location":"spectator/lang/nodejs/usage/#project","title":"Project","text":""},{"location":"spectator/lang/nodejs/usage/#spectator-js","title":"spectator-js","text":"
    • Source
    • NPM
    • Product Lifecycle: GA
    • Module Name: nflx-spectator

    This module can be used to instrument an application using counters, distribution summaries, gauges, long task timers, timers, and more complex meter types (like Bucket or Percentile Timers) using a dimensional data model.

    The generated metrics are periodically sent to an Atlas Aggregator.

    "},{"location":"spectator/lang/nodejs/usage/#spectator-js-nodejsmetrics","title":"spectator-js-nodejsmetrics","text":"
    • Source
    • NPM
    • Product Lifecycle: GA
    • Module Name: nflx-spectator-nodejsmetrics

    Generate Node.js runtime metrics using the spectator-js Node module.

    "},{"location":"spectator/lang/nodejs/usage/#install-libraries","title":"Install Libraries","text":"

    Add the following dependencies to package.json:

    {\n  \"dependencies\": {\n    \"nflx-spectator\": \"*\",\n    \"nflx-spectator-nodejsmetrics\": \"*\"\n  }\n}\n
    "},{"location":"spectator/lang/nodejs/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    'use strict';\n\nconst spectator = require('nflx-spectator');\n\n// Netflix applications can use the nflx-spectator-config node module available\n// internally through artifactory to generate the config required by nflx-spectator\nfunction getConfig() {\n  return {\n    commonTags: {'nf.node': 'i-1234'},\n    uri: 'http://atlas.example.org/v1/publish',\n    timeout: 1000 // milliseconds \n  }\n}\n\nclass Response {\n  constructor(status, size) {\n    this.status = status;\n    this.size = size;\n  }\n}\n\nclass Server {\n  constructor(registry) {\n    this.registry = registry;\n    // create a base Id, to which we'll add some dynamic tags later\n    this.requestCountId = registry.createId('server.requestCount', {version: 'v1'});\n    this.requestLatency = registry.timer('server.requestLatency');\n    this.responseSize = registry.distributionSummary('server.responseSizes');\n  }\n\n  handle(request) {\n    const start = this.registry.hrtime();\n\n    // do some work based on request and obtain a response\n    const res = new Response(200, 64);\n\n    // update the counter id with dimensions based on the request. The\n    // counter will then be looked up in the registry which should be \n    // fairly cheap, such as a lookup of an id object in a map\n    // However, it is more expensive than having a local variable set\n    // to the counter\n    const counterId = this.requestCountId.withTags({country: request.country, \n        status: res.status});\n    this.registry.counter(counterId).increment();\n    this.requestLatency.record(this.registry.hrtime(start));\n    this.responseSize.record(res.size);\n    return res;\n  }\n}\n\nconst config = getConfig();\nconst registry = new spectator.Registry(config);\n\nclass Request {\n  constructor(country) {\n    this.country = country;\n  }\n}\n\n// somehow get a request from the user...\nfunction getNextRequest() {\n  return new Request('AR');\n}\n\nfunction handleTermination() {\n  registry.stop();\n}\n\nprocess.on('SIGINT', handleTermination);\nprocess.on('SIGTERM', handleTermination);\n\nregistry.start();\n\nconst server = new Server(registry);\n\nfor (let i = 0; i < 3; ++i) {\n  const req = getNextRequest();\n  server.handle(req)\n}\n\nregistry.stop();\n
    "},{"location":"spectator/lang/nodejs/usage/#enable-runtime-metrics","title":"Enable Runtime Metrics","text":"
    'use strict';\n\nfunction getConfig() {\n}\n\nconst spectator = require('nflx-spectator');\nconst NodeMetrics = require('nflx-spectator-nodejsmetrics');\n\nconst config = {\n  commonTags: {'nf.node': 'i-1234'},\n  uri: 'http://atlas.example.org/v1/publish'\n};\nconst registry = new spectator.Registry(config);\nregistry.start();\n\nconst metrics = new NodeMetrics(registry);\nmetrics.start(); // start collecting nodejs metrics\n\n// ...\n\nmetrics.stop();\nregistry.stop();\n
    "},{"location":"spectator/lang/nodejs/usage/#netflix-integration","title":"Netflix Integration","text":"

    Create a Netflix Spectator Config to be used by spectator-js.

    Only applications should depend on the @netflix-internal/spectator-conf package. Libraries should get the Registry passed by the application, and therefore should only need to depend on spectator-js.

    Add the following dependencies to package.json:

    {\n  \"dependencies\": {\n    \"nflx-spectator\": \"*\",\n    \"nflx-spectator-nodejsmetrics\": \"*\",\n    \"@netflix-internal/spectator-conf\": \"*\"\n  }\n}\n

    This configuration also brings in spectator-js-nodejsmetrics to provide Node.js runtime metrics.

    You can override the logger used by the Spectator registry by setting the logger property. The specified logger should provide debug, info, and error methods. By default, spectator-js logs to stdout.

    const spectator = require('nflx-spectator');\nconst NodeMetrics = require('nflx-spectator-nodejsmetrics');\nconst getSpectatorConfig = require('@netflix-internal/spectator-conf');\nconst logger = require('pino')();\n\n//...\n\nconst registry = new spectator.Registry(getSpectatorConfig());\nregistry.logger = logger;\nregistry.start();\n\nconst metrics = new NodeMetrics(registry);\nmetrics.start();\n\nfunction handleTermination() {\n  metrics.stop();\n  registry.stop();\n}\n\nprocess.on('SIGINT', handleTermination);\nprocess.on('SIGTERM', handleTermination);\n\n//... your app\n\nhandleTermination();\n
    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/","title":"CPU","text":"

    Node.js runtime CPU metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#nodejscpuusage","title":"nodejs.cpuUsage","text":"

    Percentage of CPU time the Node.js process is consuming, from 0..100.

    The usage is divided into the following categories:

    • system: CPU time spent running the kernel.
    • user: CPU time spent running user space (non-kernel) processes.

    Unit: percent

    Dimensions:

    • id: The category of CPU usage.

    Example:

    {\n  \"tags\": {\n    \"id\": \"system\",\n    \"name\": \"nodejs.cpuUsage\",\n    /// nf.* tags\n    \"nodejs.version\": \"v6.5.0\"\n  },\n  \"start\": 1485813720000,\n  \"value\": 0.8954088417692685\n},\n{\n  \"tags\": {\n    \"id\": \"user\",\n    \"name\": \"nodejs.cpuUsage\",\n    /// nf.* tags\n    \"nodejs.version\": \"v6.5.0\"\n  },\n  \"start\": 1485813720000,\n  \"value\": 4.659007745141895\n}\n
    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/","title":"Event Loop","text":"

    Node.js runtime event loop metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#nodejseventloop","title":"nodejs.eventLoop","text":"

    The time it takes for the event loop to complete. This is sampled twice per second.

    Unit: seconds

    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#nodejseventlooplag","title":"nodejs.eventLoopLag","text":"

    The time that the event loop is running behind, as measured by attempting to execute a timer once per second.

    Unit: seconds

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/","title":"File Descriptor","text":"

    Node.js runtime file descriptor metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#openfiledescriptorscount","title":"openFileDescriptorsCount","text":"

    Number of file descriptors currently open.

    Unit: file descriptors

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#maxfiledescriptorscount","title":"maxFileDescriptorsCount","text":"

    The maximum number of file descriptors that can be open at the same time.

    Unit: file descriptors

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/","title":"Garbarge Collection","text":"

    Node.js runtime garbage collection metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcallocationrate","title":"nodejs.gc.allocationRate","text":"

    The rate at which the app is allocating memory.

    Unit: bytes/second

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgclivedatasize","title":"nodejs.gc.liveDataSize","text":"

    The size of the old_space after a major GC event.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcmaxdatasize","title":"nodejs.gc.maxDataSize","text":"

    The maximum amount of memory the nodejs process is allowed to use. This is primarily used for gaining perspective on the liveDataSize.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcpause","title":"nodejs.gc.pause","text":"

    The time it takes to complete different GC events.

    Event categories:

    • scavenge: The most common garbage collection method. Node will typically trigger one of these every time the VM is idle.
    • markSweepCompact: The heaviest type of garbage collection V8 may do. If you see many of these happening you will need to either keep fewer objects around in your process or increase V8's heap limit.
    • incrementalMarking: A phased garbage collection that interleaves collection with application logic to reduce the amount of time the application is paused.
    • processWeakCallbacks: After a garbage collection occurs, V8 will call any weak reference callbacks registered for objects that have been freed. This measurement is from the start of the first weak callback to the end of the last for a given garbage collection.

    Unit: seconds

    Dimensions:

    • id: The GC event category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcpromotionrate","title":"nodejs.gc.promotionRate","text":"

    The rate at which data is being moved from new_space to old_space.

    Unit: bytes/second

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/","title":"Heap","text":"

    Node.js runtime heap metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#metrics","title":"Metrics","text":"

    Data is gathered from the v8.getHeapStatistics method.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsdoeszapgarbage","title":"nodejs.doesZapGarbage","text":"

    Whether or not the --zap_code_space option is enabled.

    This makes V8 overwrite heap garbage with a bit pattern. The RSS footprint (resident memory set) gets bigger because it continuously touches all heap pages and that makes them less likely to get swapped out by the operating system.

    Unit: boolean

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsheapsizelimit","title":"nodejs.heapSizeLimit","text":"

    The absolute limit the heap cannot exceed (default limit or --max_old_space_size).

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsmallocedmemory","title":"nodejs.mallocedMemory","text":"

    Current amount of memory, obtained via malloc.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejspeakmallocedmemory","title":"nodejs.peakMallocedMemory","text":"

    Peak amount of memory, obtained via malloc.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalavailablesize","title":"nodejs.totalAvailableSize","text":"

    Available heap size.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalheapsize","title":"nodejs.totalHeapSize","text":"

    Memory V8 has allocated for the heap. This can grow if usedHeap needs more.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalheapsizeexecutable","title":"nodejs.totalHeapSizeExecutable","text":"

    Memory for compiled bytecode and JITed code.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalphysicalsize","title":"nodejs.totalPhysicalSize","text":"

    Committed size.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsusedheapsize","title":"nodejs.usedHeapSize","text":"

    Memory used by application data.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/","title":"Heap Space","text":"

    Node.js runtime heap space metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#metrics","title":"Metrics","text":"

    Data is gathered from the v8.getHeapSpaceStatistics method, for each space listed.

    Space categories:

    • new_space: Where new allocations happen; it is fast to allocate and collect garbage here. Objects living in the New Space are called the Young Generation.
    • old_space: Object that survived the New Space collector are promoted here; they are called the Old Generation. Allocation in the Old Space is fast, but collection is expensive so it is less frequently performed.
    • code_space: Contains executable code and therefore is marked executable.
    • map_space: Contains map objects only.
    • large_object_space: Contains promoted large objects which exceed the size limits of other spaces. Each object gets its own mmap region of memory and these objects are never moved by GC.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspacesize","title":"nodejs.spaceSize","text":"

    The allocated size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspaceusedsize","title":"nodejs.spaceUsedSize","text":"

    The used size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspaceavailablesize","title":"nodejs.spaceAvailableSize","text":"

    The available size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsphysicalspacesize","title":"nodejs.physicalSpaceSize","text":"

    The physical size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/","title":"Memory","text":"

    Node.js runtime memory metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsrss","title":"nodejs.rss","text":"

    Resident Set Size, which is the total memory allocated for the process execution. This includes the Code Segment, Stack (local variables and pointers) and Heap (objects and closures).

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsheaptotal","title":"nodejs.heapTotal","text":"

    Total size of the allocated heap.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsheapused","title":"nodejs.heapUsed","text":"

    Memory used during the execution of our process.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsexternal","title":"nodejs.external","text":"

    Memory usage of C++ objects bound to JavaScript objects managed by V8.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/meters/counter/","title":"Counters","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/dist-summary/","title":"Distribution Summaries","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/gauge/","title":"Gauges","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/percentile-timer/","title":"Percentile Timers","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/timer/","title":"Timers","text":"

    TBD

    "},{"location":"spectator/lang/py/migrations/","title":"Migrations","text":""},{"location":"spectator/lang/py/migrations/#migrating-from-01x-to-02x","title":"Migrating from 0.1.X to 0.2.X","text":"
    • This library no longer publishes directly to the Atlas backends. It now publishes to the [SpectatorD] sidecar which is bundled with all standard AMIs and containers. If you must have the previous direct publishing behavior, because SpectatorD is not yet available on the platform where your code runs, then you can pin to version 0.1.18.
    • The internal Netflix configuration companion library is no longer required and this dependency may be dropped from your project.
    • The API surface area remains unchanged to avoid breaking library consumers, and standard uses of GlobalRegistry helper methods for publishing metrics continue to work as expected. Several helper methods on meter classes are now no-ops, always returning values such as 0 or nan. If you want to write tests to validate metrics publication, take a look at the tests in this library for a few examples of how that can be done. The core idea is to capture the lines which will be written out to SpectatorD.
    • Replace uses of PercentileDistributionSummary with direct use of the Registry pct_distribution_summary method.

      # before\nfrom spectator import GlobalRegistry\nfrom spectator.histogram import PercentileDistributionSummary\n\nd = PercentileDistributionSummary(GlobalRegistry, \"server.requestSize\")\nd.record(10)\n
      # after\nfrom spectator import GlobalRegistry\n\nGlobalRegistry.pct_distribution_summary(\"server.requestSize\").record(10)\n
    • Replace uses of PercentileTimer with direct use of the Registry pct_timer method.

      # before\nfrom spectator import GlobalRegistry\nfrom spectator.histogram import PercentileTimer\n\nt = PercentileTimer(GlobalRegistry, \"server.requestSize\")\nt.record(0.01)\n
      # after\nfrom spectator import GlobalRegistry\n\nGlobalRegistry.pct_timer(\"server.requestSize\").record(0.1)\n
    • Implemented new meter types supported by [SpectatorD]: age_gauge, max_gauge and monotonic_counter. See the SpectatorD documentation or the class docstrings for more details.

    "},{"location":"spectator/lang/py/usage/","title":"spectator-py Usage","text":"

    Python thin-client metrics library for use with Atlas and SpectatorD.

    Supports Python >= 3.5. This version is chosen as the baseline, because it is the oldest system Python available in our operating environments.

    "},{"location":"spectator/lang/py/usage/#installing","title":"Installing","text":"

    Install this library for your project as follows:

    pip3 install netflix-spectator-py\n

    Publishing metrics requires a SpectatorD process running on your instance.

    "},{"location":"spectator/lang/py/usage/#importing","title":"Importing","text":""},{"location":"spectator/lang/py/usage/#standard-usage","title":"Standard Usage","text":"

    Importing the GlobalRegistry instantiates a Registry with a default configuration that applies process-specific common tags based on environment variables and opens a socket to the SpectatorD agent. The remainder of the instance-specific common tags are provided by SpectatorD.

    from spectator import GlobalRegistry\n

    Once the GlobalRegistry is imported, it is used to create and manage Meters.

    "},{"location":"spectator/lang/py/usage/#logging","title":"Logging","text":"

    This package provides the following loggers:

    • spectator.MeterId
    • spectator.SidecarWriter

    The MeterId logger is used to report invalid meters which have not-a-str tag keys or values.

    When troubleshooting metrics collection and reporting, you should set the SidecarWriter logging to the DEBUG level, before the first metric is recorded. For example:

    import logging\n\n# record the human-readable time, name of the logger, logging level, thread id and message\nlogging.basicConfig(\n    level=logging.DEBUG,\n    format='%(asctime)s - %(name)s - %(levelname)s - %(thread)d - %(message)s'\n)\n\nlogging.getLogger('spectator.SidecarWriter').setLevel(logging.DEBUG)\n

    There is approximately a 10% performance penalty in UDP write performance when debug logging is enabled. It may be more, depending on the exact logging configuration (i.e. flushing to slow disk).

    "},{"location":"spectator/lang/py/usage/#working-with-ids","title":"Working with IDs","text":"

    The IDs used for identifying a meter in the GlobalRegistry consist of a name and a set of tags. IDs will be consumed by users many times after the data has been reported, so they should be chosen thoughtfully, while considering how they will be used. See the naming conventions page for general guidelines.

    IDs are immutable, so they can be freely passed around and used in a concurrent context. Tags can be added to an ID when it is created, to track the dimensionality of the metric. All tag keys and values must be strings. For example, if you want to keep track of the number of successful requests, you must cast integers to strings.

    from spectator import GlobalRegistry\n\nrequests_id = GlobalRegistry.counter(\"server.numRequests\", {\"statusCode\": str(200)})\nrequests_id.increment()\n
    "},{"location":"spectator/lang/py/usage/#meter-types","title":"Meter Types","text":"
    • Age Gauge
    • Counter
    • Distribution Summary
    • Gauge
    • Max Gauge
    • Monotonic Counter
    • Percentile Distribution Summary
    • Percentile Timer
    • Timer
    "},{"location":"spectator/lang/py/usage/#asyncio-support","title":"asyncio Support","text":"

    The GlobalRegistry provides a UdpWriter implementation of the SidecarWriter by default. UDP is a non-blocking, unordered and connectionless protocol, which is ideal for communicating with a local SpectatorD process in a variety of circumstances. The UdpWriter should be used in asyncio applications.

    The PrintWriter implementation, which can be used to communicate with the SpectatorD Unix domain socket, does not offer asyncio support at this time.

    "},{"location":"spectator/lang/py/usage/#ipv6-support","title":"IPv6 Support","text":"

    By default, SpectatorD will listen on IPv6 UDP *:1234, without setting the v6_only(true) flag. On dual-stacked systems, this means that it will receive packets from both IPv4 and IPv6, and the IPv4 addresses will show up on the server as IPv4-mapped IPv6 addresses.

    By default, the GlobalRegistry will write UDP packets to 127.0.0.1:1234, which will allow for communication with SpectatorD on dual-stacked systems.

    On IPv6-only systems, it may be necessary to change the default configuration using one of the following methods:

    • Configure the following environment variable, which will override the default configuration of the GlobalRegistry:

      export SPECTATOR_OUTPUT_LOCATION=\"udp://[::1]:1234\"

    • Configure a custom Registry, instead of using the GlobalRegistry:

      from spectator import Registry from spectator.sidecarconfig import SidecarConfig

      r = Registry(config=SidecarConfig({\"sidecar.output-location\": \"udp://[::1]:1234\"})) r.counter(\"test\").increment()

    "},{"location":"spectator/lang/py/usage/#writing-tests","title":"Writing Tests","text":"

    To write tests against this library, instantiate a test instance of the Registry and configure it to use the MemoryWriter, which stores all updates in a List. Use the writer() method on the Registry to access the writer, then inspect the last_line() or get() all messages to verify your metrics updates.

    import unittest\n\nfrom spectator import Registry\nfrom spectator.sidecarconfig import SidecarConfig\n\nclass MetricsTest(unittest.TestCase):\n\n    def test_counter(self):\n        r = Registry(config=SidecarConfig({\"sidecar.output-location\": \"memory\"}))\n\n        c = r.counter(\"test\")\n        self.assertTrue(r.writer().is_empty())\n\n        c.increment()\n        self.assertEqual(\"c:test:1\", r.writer().last_line())\n

    If you need to override the default output location (udp) of the GlobalRegistry, then you can set a SPECTATOR_OUTPUT_LOCATION environment variable to one of the following values supported by the SidecarConfig class:

    • none - Disable output.
    • memory - Write to memory.
    • stdout - Write to standard out for the process.
    • stderr - Write to standard error for the process.
    • file://$path_to_file - Write to a file (e.g. file:///tmp/foo/bar).
    • udp://$host:$port - Write to a UDP socket.

    If you want to disable metrics publishing from the GlobalRegistry, then you can set:

    export SPECTATOR_OUTPUT_LOCATION=none\n

    If you want to validate the metrics that will be published through the GlobalRegistry in tests, then you can set:

    export SPECTATOR_OUTPUT_LOCATION=memory\n

    The MemoryWriter subclass offers a few methods to inspect the values that it captures:

    • clear() - Delete the contents of the internal list.
    • get() - Return the internal list.
    • is_empty() - Is the internal list empty?
    • last_line() - Return the last element of the internal list.

    Lastly, a SpectatorD line protocol parser is available, which is intended to be used for validating the results captured by a MemoryWriter. It may be used as follows:

    import unittest\n\nfrom spectator.counter import Counter\nfrom spectator.protocolparser import parse_protocol_line\n\n\nclass ProtocolParserTest(unittest.TestCase):\n\n    def test_parse_counter_with_multiple_tags(self):\n        meter_class, meter_id, value = parse_protocol_line(\"c:test,foo=bar,baz=quux:1\")\n        self.assertEqual(Counter, meter_class)\n        self.assertEqual(\"test\", meter_id.name)\n        self.assertEqual({\"foo\": \"bar\", \"baz\": \"quux\"}, meter_id.tags())\n        self.assertEqual(\"1\", value)\n
    "},{"location":"spectator/lang/py/meters/age-gauge/","title":"Age Gauges","text":"

    The value is the time in seconds since the epoch at which an event has successfully occurred, or 0 to use the current time in epoch seconds. After an Age Gauge has been set, it will continue reporting the number of seconds since the last time recorded, for as long as the SpectatorD process runs. The purpose of this metric type is to enable users to more easily implement the Time Since Last Success alerting pattern.

    To set a specific time as the last success:

    from spectator import GlobalRegistry\n\nGlobalRegistry.age_gauge(\"time.sinceLastSuccess\").set(1611081000)\n

    To set now() as the last success:

    from spectator import GlobalRegistry\n\nGlobalRegistry.age_gauge(\"time.sinceLastSuccess\").set(0)\n

    By default, a maximum of 1000 Age Gauges are allowed per spectatord process, because there is no mechanism for cleaning them up. This value may be tuned with the --age_gauge_limit flag on the spectatord binary.

    Since Age Gauges are long-lived entities that reside in the memory of the SpectatorD process, if you need to delete and re-create them for any reason, then you can use the SpectatorD admin server to accomplish this task. You can delete all Age Gauges or a single Age Gauge.

    Example:

    curl -X DELETE \\\nhttp://localhost:1234/metrics/A\n
    curl -X DELETE \\\nhttp://localhost:1234/metrics/A/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    "},{"location":"spectator/lang/py/meters/counter/","title":"Counters","text":"

    A Counter is used to measure the rate at which an event is occurring. Considering an API endpoint, a Counter could be used to measure the rate at which it is being accessed.

    Counters are reported to the backend as a rate-per-second. In Atlas, the :per-step operator can be used to convert them back into a value-per-step on a graph.

    Call increment() when an event occurs:

    from spectator import GlobalRegistry\n\nGlobalRegistry.counter(\"server.numRequests\").increment()\n

    You can also pass a value to increment(). This is useful when a collection of events happens together:

    from spectator import GlobalRegistry\n\nGlobalRegistry.counter(\"queue.itemsAdded\").increment(10)\n
    "},{"location":"spectator/lang/py/meters/dist-summary/","title":"Distribution Summaries","text":"

    A Distribution Summary is used to track the distribution of events. It is similar to a Timer, but more general, in that the size does not have to be a period of time. For example, a Distribution Summary could be used to measure the payload sizes of requests hitting a server.

    Always use base units when recording data, to ensure that the tick labels presented on Atlas graphs are readable. If you are measuring payload size, then use bytes, not kilobytes (or some other unit). This means that a 4K tick label will represent 4 kilobytes, rather than 4 kilo-kilobytes.

    Call record() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.distribution_summary(\"server.requestSize\").record(10)\n
    "},{"location":"spectator/lang/py/meters/gauge/","title":"Gauges","text":"

    A gauge is a value that is sampled at some point in time. Typical examples for gauges would be the size of a queue or number of threads in a running state. Since gauges are not updated inline when a state change occurs, there is no information about what might have occurred between samples.

    Consider monitoring the behavior of a queue of tasks. If the data is being collected once a minute, then a gauge for the size will show the size when it was sampled. The size may have been much higher or lower at some point during interval, but that is not known.

    Call set() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.gauge(\"server.queueSize\").set(10)\n

    Gauges will report the last set value for 15 minutes. This done so that updates to the values do not need to be collected on a tight 1-minute schedule to ensure that Atlas shows unbroken lines in graphs. A custom TTL may be configured for gauges. SpectatorD enforces a minimum TTL of 5 seconds.

    from spectator import GlobalRegistry\n\nGlobalRegistry.gauge(\"server.queueSize\", ttl_seconds=120).set(10)\n
    "},{"location":"spectator/lang/py/meters/max-gauge/","title":"Max Gauges","text":"

    The value is a number that is sampled at a point in time, but it is reported as a maximum Gauge value to the backend. This ensures that only the maximum value observed during a reporting interval is sent to the backend, thus over-riding the last-write-wins semantics of standard Gauges. Unlike standard Gauges, Max Gauges do not continue to report to the backend, and there is no TTL.

    Call set() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.max_gauge(\"server.queueSize\").set(10)\n
    "},{"location":"spectator/lang/py/meters/mono-counter/","title":"Monotonic Counters","text":"

    A Monotonic Counter is used to measure the rate at which an event is occurring, when the source data is a monotonically increasing number. A minimum of two samples must be sent, in order to calculate a delta value and report it to the backend as a rate-per-second. A variety of networking metrics may be reported monotonically, and this metric type provides a convenient means of recording these values, at the expense of a slower time-to-first metric.

    Call set() when an event occurs:

    from spectator import GlobalRegistry\n\nGlobalRegistry.monotonic_counter(\"iface.bytes\").set(10)\n
    "},{"location":"spectator/lang/py/meters/pct-dist-summary/","title":"Percentile Distribution Summaries","text":"

    The value tracks the distribution of events, with percentile estimates. It is similar to a Percentile Timer, but more general, because the size does not have to be a period of time.

    For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Distribution Summary. Be diligent about any additional dimensions added to Percentile Distribution Summaries and ensure that they have a small bounded cardinality.

    Call record() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.pct_distribution_summary(\"server.requestSize\").record(10)\n
    "},{"location":"spectator/lang/py/meters/pct-timer/","title":"Percentile Timers","text":"

    The value is the number of seconds that have elapsed for an event, with percentile estimates.

    This metric type will track the data distribution by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality.

    Call record() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.pct_timer(\"server.requestLatency\").record(0.01)\n

    A stopwatch() method is available which may be used as a Context Manager to automatically record the number of seconds that have elapsed while executing a block of code:

    import time\nfrom spectator import GlobalRegistry\n\nt = GlobalRegistry.pct_timer(\"thread.sleep\")\n\nwith t.stopwatch():\n    time.sleep(5)\n
    "},{"location":"spectator/lang/py/meters/timer/","title":"Timers","text":"

    A Timer is used to measure how long (in seconds) some event is taking.

    Call record() with a value:

    from spectator import GlobalRegistry\n\nGlobalRegistry.timer(\"server.requestLatency\").record(0.01)\n

    A stopwatch() method is available which may be used as a Context Manager to automatically record the number of seconds that have elapsed while executing a block of code:

    import time\nfrom spectator import GlobalRegistry\n\nt = GlobalRegistry.timer(\"thread.sleep\")\n\nwith t.stopwatch():\n    time.sleep(5)\n

    Internally, Timers will keep track of the following statistics as they are used:

    • count
    • totalTime
    • totalOfSquares
    • max
    "},{"location":"spectator/lang/rb/deprecated/","title":"spectator-rb Usage","text":"

    This client library is deprecated, does not support spectatord, and it is no longer maintained.

    You should move to a Paved Path language as soon as possible.

    "},{"location":"spectator/patterns/cardinality-limiter/","title":"Cardinality Limiter","text":"

    Helper functions to help manage the cardinality of tag values. This should be used anywhere you cannot guarantee that the tag values being used are strictly bounded. There is support for two different modes: (1) selecting the first N values that are seen, or (2) selecting the most frequent N values that are seen.

    Example usage:

    class WebServer {\n\n  // Limiter instance, should be shared for all uses of that tag value\n  private final Function&lt;String, String&gt; pathLimiter =\n    CardinalityLimiters.mostFrequent(10);\n\n  private final Registry registry;\n  private final Id baseId;\n\n  public WebServer(Registry registry) {\n    this.registry = registry;\n    this.baseId = registry.createId(\"server.requestCount\");\n  }\n\n  public Response handleRequest(Request req) {\n    Response res = doSomething(req);\n\n    // Update metrics, use limiter to restrict the set of values for the\n    // path and avoid an explosion\n    String pathValue = pathLimiter.apply(req.getPath());\n    Id id = baseId\n      .withTag(\"path\", pathValue)\n      .withTag(\"status\", res.getStatus());\n    registry.counter(id).increment();\n  }\n}\n
    "},{"location":"spectator/patterns/gauge-poller/","title":"Gauge Poller","text":"

    Helper for polling gauges in a background thread. A shared executor is used with a single thread. If registered gauge methods are cheap as they should be, then this should be plenty of capacity to process everything regularly. If not, then this will help limit the damage to a single core and avoid causing problems for the application.

    "},{"location":"spectator/patterns/interval-counter/","title":"Interval Counter","text":"

    A counter that also keeps track of the time since last update.

    "},{"location":"spectator/patterns/long-task-timer/","title":"Long Task Timer","text":"

    Timer intended to track a small number of long running tasks. Example would be something like a batch hadoop job. Though \"long running\" is a bit subjective the assumption is that anything over a minute is long running.

    A regular Timer just records the duration and has no information until the task is complete.

    As an example, consider a chart showing request latency to a typical web server. The expectation is many short requests, so the timer will be getting updated many times per second.

    Now consider a background process to refresh metadata from a data store. For example, Edda caches AWS resources such as instances, volumes, auto-scaling groups etc. Normally, all data can be refreshed in a few minutes. If the AWS services are having problems, it can take much longer. A long duration timer can be used to track the overall time for refreshing the metadata.

    The charts below show max latency for the refresh using a regular timer and a long task timer. Regular timer, note that the y-axis is using a logarithmic scale:

    Long Task Timer:

    "},{"location":"spectator/patterns/percentile-timer/","title":"Percentile Timers","text":"

    A Timer that buckets the counts, to allow for estimating percentiles. This Timer type will track the data distribution for the timer by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions.

    Warning

    Please be selective about what you measure as there is significant overhead on both the client and storage side. Usually only one or two key performance indicators (KPIs) per application. Limit the tag cardinality as much as possible. For example, only include an endpoint tag, not a user agent or response code. Use one of the other meter types whenever possible.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality. In addition, it is highly recommended to set a range, whenever possible, to restrict the worst case overhead.

    When using the builder, the range will default from 10 ms to 1 minute. Based on data at Netflix, this is the most common range for request latencies and restricting to this window reduces the worst case multiple from 276X to 58X.

    "},{"location":"spectator/patterns/percentile-timer/#range-recommendations","title":"Range Recommendations","text":"

    The range should be the SLA boundary or failure point for the activity. Explicitly setting the range allows us to optimize for the important range of values and reduce the overhead associated with tracking the data distribution.

    For example, suppose you are making a client call and timeout after 10 seconds. Setting the range to 10 seconds will restrict the possible set of buckets used to those approaching the boundary. So we can still detect if it is nearing failure, but percentiles that are further away from the range may be inflated compared to the actual value.

    "},{"location":"spectator/patterns/percentile-timer/#bucket-distribution","title":"Bucket Distribution","text":"

    The set of buckets is generated by using powers of 4 and incrementing by one-third of the previous power of 4 in between as long as the value is less than the next power of 4 minus the delta.

    Base: 1, 2, 3\n\n4 (4^1), delta = 1\n    5, 6, 7, ..., 14,\n\n16 (4^2), delta = 5\n   21, 26, 31, ..., 56,\n\n64 (4^3), delta = 21\n...\n
    "},{"location":"spectator/patterns/polled-meter/","title":"Polled Meter","text":"

    Helper for configuring a meter that will receive a value by regularly polling the source in the background.

    Example usage:

    Registry registry = ...\nAtomicLong connections = PolledMeter.using(registry)\n  .withName(\"server.currentConnections\")\n  .monitorValue(new AtomicLong());\n\n// When a connection is added\nconnections.incrementAndGet();\n\n// When a connection is removed\nconnections.decrementAndGet();\n

    Polling frequency will depend on the underlying Registry implementation, but users should assume it will be frequently checked and that the provided function is cheap. Users should keep in mind that polling will not capture all activity, just sample it at some frequency. For example, if monitoring a queue, then a meter will only tell you the last sampled size when the value is reported. If more details are needed, then use an alternative type and ensure that all changes are reported when they occur.

    For example, consider tracking the number of currently established connections to a server. Using a polled meter will show the last sampled number when reported. An alternative would be to report the number of connections to a Distribution Summary every time a connection is added or removed. The distribution summary would provide more accurate tracking such as max and average number of connections across an interval of time. The polled meter would not provide that level of detail.

    If multiple values are monitored with the same id, then the values will be aggregated and the sum will be reported. For example, registering multiple meters for active threads in a thread pool with the same id would produce a value that is the overall number of active threads. For other behaviors, manage it on the user side and avoid multiple registrations.

    "},{"location":"spectator/specs/ipc/","title":"IPC","text":"

    This is a description of the Common IPC Metrics that can be published by various IPC libraries, with the goal of allowing consolidated monitoring and analysis across differing IPC implementations.

    "},{"location":"spectator/specs/ipc/#dimensions-common-to-all-metrics","title":"Dimensions Common to All Metrics","text":"

    Not all dimensions are applicable for all of the metrics, and later in the sections for each specific metric, the applicable dimensions are specified.

    Also note that not all dimensions have been implemented or are applicable for all implementations.

    • ipc.protocol: A short name of the network protocol in use, eg. grpc, http_1, http_2, udp, etc ...
    • ipc.vip: The Eureka VIP address used to find the the server.
    • ipc.result: Was this considered by the implementation to be successful. Allowed Values = [success, failure].
    • ipc.status: One of a predefined list of status values indicating the general result, eg. success, bad_request, timeout, etc\u2026 See the ipc.status values section below.
    • ipc.status.detail: For cases where the ipc.status needs to be further subdivided, this tag can hold an additional more specific detail, likely ipc-implementation specific. eg status of connection_error and detail of no_servers / connect_timeout / ssl_handshake_failure.
    • ipc.failure.injected: Indicates that an artificial failure was injected into the request processing for testing purposes. The outcome of that failure will be reflected in the other error tags. Allowed Values = [true]
    • ipc.endpoint: The name of the endpoint/function/feature the message was sent to within the server (eg. the URL path prefix for a java servlet, or the grpc endpoint name).
    • ipc.attempt: Which attempt at sending this message is this. Allowed Values = [initial, second, third_up] (initial is the first attempt, second is 2nd attempt but first retry, third_up means third or higher attempt).
    • ipc.attempt.final: Indicates if this request was the final attempt of potentially multiple retry attempts. Allowed Values = [true, false].
    • ipc.server.app: The nf.app of the server the message is being sent to.
    • ipc.server.cluster: The nf.cluster of the server the message is being sent to.
    • ipc.server.asg: The nf.asg of the server the message is being sent to.
    • ipc.client.app: The nf.app of the server the message is being sent from.
    • ipc.client.cluster: The nf.cluster of the server the message is being sent from.
    • ipc.client.asg: The nf.asg of the server the message is being sent from.
    • owner: The library/impl publishing the metrics, eg. evcache, zuul, grpc, nodequark, platform_1_ipc, geoclient, etc ...
    • id: Conceptual name of service. Equivalent of RestClient name in NIWS.
    "},{"location":"spectator/specs/ipc/#allowed-values-for-ipcstatus-dimension","title":"Allowed Values for ipc.status Dimension","text":"
    • success: The request was successfully processed and responded to, as far as the client or server know.
    • bad_request: There was a problem with the clients' request causing it not to be fulfilled.
    • unexpected_error: The client or server encountered an unexpected error processing the request.
    • connection_error: There was an error with the underlying network connection either during establishment or while in use.
    • unavailable: There were no servers available to process the request.
    • throttled: The request was rejected due to the client or server considering the server to be above capacity.
    • timeout: The request could not or would not be complete within the configured threshold (either on client or server).
    • cancelled: The client cancelled the request before it was completed.
    • access_denied: The request was denied access for authentication or authorization reasons.
    "},{"location":"spectator/specs/ipc/#server-metrics","title":"Server Metrics","text":""},{"location":"spectator/specs/ipc/#ipcservercall","title":"ipc.server.call","text":"

    This is a percentile timer that is recorded for each inbound message to a server.

    Unit: seconds

    Dimensions:

    • ipc.protocol
    • ipc.result
    • ipc.vip
    • ipc.endpoint
    • ipc.status
    • ipc.status.detail
    • ipc.failure.injected
    • ipc.attempt
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcservercallsizeinbound","title":"ipc.server.call.size.inbound","text":"

    This is a distribution summary of the size in bytes of inbound messages received by a server.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcservercallsizeoutbound","title":"ipc.server.call.size.outbound","text":"

    This is a distribution summary of the size in bytes of outbound messages sent from a server.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcserverinflight","title":"ipc.server.inflight","text":"

    This is a distribution summary that shows the number of inbound IPC messages currently being processed in a server.

    Unit: inflight message count

    Dimensions:

    • ipc.protocol
    • ipc.endpoint
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#client-metrics","title":"Client Metrics","text":""},{"location":"spectator/specs/ipc/#ipcclientcall","title":"ipc.client.call","text":"

    This is a percentile timer that is recorded for each outbound message from a client.

    Unit: seconds

    Dimensions:

    • ipc.protocol
    • ipc.result
    • ipc.vip
    • ipc.endpoint
    • ipc.status
    • ipc.status.detail
    • ipc.failure.injected
    • ipc.attempt
    • ipc.attempt.final
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientcallsizeinbound","title":"ipc.client.call.size.inbound","text":"

    This is a distribution summary of the size in bytes of inbound messages received by a client.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientcallsizeoutbound","title":"ipc.client.call.size.outbound","text":"

    This is a distribution summary of the size in bytes of outbound messages sent from a client.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientinflight","title":"ipc.client.inflight","text":"

    This is a distribution summary that shows the number of currently outstanding outbound IPC messages from a client.

    Unit: inflight message count

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Atlas","text":"

    Atlas was developed by Netflix to manage dimensional time series data for near real-time operational insight. Atlas features in-memory data storage, allowing it to gather and report very large numbers of metrics, very quickly.

    Atlas captures operational intelligence. Whereas business intelligence is data gathered for analyzing trends over time, operational intelligence provides a picture of what is currently happening within a system.

    Atlas was built because the existing systems Netflix was using for operational intelligence were not able to cope with the increase in metrics we were seeing as we expanded our operations in the cloud. In 2011, we were monitoring 2 million metrics related to our streaming systems. By 2014, we were at 1.2 billion metrics and the numbers continue to rise. Atlas is designed to handle this large quantity of data and can scale with the hardware we use to analyze and store it.

    For details and background on the project please read through the overview page.

    Check out the getting started page for an introduction to using Atlas in the cloud environment. Once you've explored the example, check out the stack language references to see the various types of information you can access.

    "},{"location":"getting-started/","title":"Getting Started","text":"

    The instructions on this page are for quickly getting a sample backend server running on a local machine. For other common tasks see:

    • Querying Data:
      • Examples
      • Tutorial
    • Instrumenting Code
    "},{"location":"getting-started/#run-a-demo-instance","title":"Run a Demo Instance","text":"

    Prerequisites

    • These instructions assume a unix based machine with curl. Other systems may work, but have not been tried.
    • Java 8 or higher is required.

    To quickly run a version with some synthetic sample data:

    $ curl -LO https://github.com/Netflix/atlas/releases/download/v1.7.8/atlas-standalone-1.7.8.jar\n$ java -jar atlas-standalone-1.7.8.jar\n
    "},{"location":"getting-started/#explore-available-tags","title":"Explore Available Tags","text":"

    The tags API is used to explore available tags and the relationships between them.

    # show all tags\n$ curl -s 'http://localhost:7101/api/v1/tags'\n\n# show all values of the name, nf.app and type tags\n$ curl -s 'http://localhost:7101/api/v1/tags/name'\n$ curl -s 'http://localhost:7101/api/v1/tags/nf.app'\n$ curl -s 'http://localhost:7101/api/v1/tags/type'\n\n# show all name tags that also have the type tag\n$ curl -s 'http://localhost:7101/api/v1/tags/name?q=type,:has'\n\n# show all name tags that have an nf.app tag with a value of nccp\n$ curl -s 'http://localhost:7101/api/v1/tags/name?q=nf.app,nccp,:eq'\n
    "},{"location":"getting-started/#generate-graphs","title":"Generate Graphs","text":"

    These graph API URLs show off a couple of the capabilities of the Atlas backend. See the Examples page for more detailed use cases.

    # graph all metrics with a name tag value of ssCpuUser, using an :avg aggregation\n$ curl -Lo graph.png 'http://localhost:7101/api/v1/graph?q=name,ssCpuUser,:eq,:avg'\n\n# duplicate the ssCpuUser signal, check if it is greater than 22.8 and display the result as a vertical span with 30% alpha\n$ curl -Lo graph.png 'http://localhost:7101/api/v1/graph?q=name,ssCpuUser,:eq,:avg,:dup,22.8,:gt,:vspan,30,:alpha'\n
    "},{"location":"getting-started/#running-demo-with-memory-storage","title":"Running Demo with Memory Storage","text":"

    Run an instance with a configuration to use the memory storage:

    $ curl -Lo memory.conf https://raw.githubusercontent.com/Netflix/atlas/v1.7.x/conf/memory.conf\n$ java -jar atlas-standalone-1.7.8.jar memory.conf\n

    Now we can send some data to it. To quickly get started there is a sample script to send in some data:

    $ curl -Lo publish-test.sh https://raw.githubusercontent.com/Netflix/atlas/v1.7.x/scripts/publish-test.sh\n$ chmod 755 publish-test.sh\n$ ./publish-test.sh\n

    Then view the data in a web browser:

    $ open 'http://localhost:7101/api/v1/graph?q=name,randomValue,:eq,:sum,(,name,),:by'\n
    "},{"location":"overview/","title":"Overview","text":"

    Atlas is the system Netflix uses to manage dimensional time-series data for near real-time operational insight. It was primarily created to address issues with scale and query capability in the previous system.

    "},{"location":"overview/#history","title":"History","text":"

    In May of 2011, Netflix was using a home-grown solution called Epic to manage time-series data. Epic was a combination of perl CGI scripts, RRDTool logging, and MySQL. We were tracking around 2M distinct time series and the monitoring system was regularly failing to keep up with the volume of data. In addition there were a number of trends in the company which presaged a drastic increase in metric volume:

    • Rolling pushes to Red/Black deployments.
    • Leveraging auto-scaling for large clusters. Netflix has always used auto-scaling groups in AWS, but initially most were configured with fixed size and just used as a group and to replace instances.
    • Expansion internationally into Latin America and Europe. This led to an increase in the number of countries being tracked for key metrics and for Europe it was the first move into additional AWS regions. With additional regions we also wanted to have better isolation so a problem with monitoring in one region would not impact another, but at the same time have a mechanism to provide a global view if needed.

    Since that time the metric volume has continued to grow quickly. The graph below shows the increase in metrics measured over last few years:

    The growth in raw volume required increased query capability to actually use the data.

    "},{"location":"overview/#goals","title":"Goals","text":"

    The main goals for Atlas were to build a system that provided:

    • A Common API
    • Scale
    • Dimensionality
    "},{"location":"overview/#common-api","title":"Common API","text":"

    Epic did a number of things really well that we didn't want to lose when transitioning. In particular:

    • Normalization and consolidation
    • Flexible legends that scale independently of the chart
    • Math, especially handling of NaN values representing no data
    • Holt-Winters used for alerting
    • Visualization options
    • Deep linking

    Many of these are capabilities that are provided by the RRDTool library Epic was using, but most alternatives we looked at fell short in these categories. In addition, we have uses for other 3rd party services like CloudWatch and it is desirable to have common query capability for that data.

    "},{"location":"overview/#scale","title":"Scale","text":"

    As indicated in the history section, metrics volume was growing and we needed a system that could keep up. For a long time our biggest concern was write volume, however, we also wanted to scale in terms of the amount of data we could read or aggregate as part of a graph request.

    "},{"location":"overview/#dimensionality","title":"Dimensionality","text":"

    This is a decision that was made because users were already doing it in ways that were hard to support. Epic only support a simple name with some special case system dimensions of cluster and node. Many users were creating names like:

    com.netflix.eds.nccp.successful.requests.uiversion.nccprt-authorization.devtypid-101.clver-PHL_0AB.uiver-UI_169_mid.geo-US\n

    That breaks down to:

    Key Value name com.netflix.eds.nccp.successful.requests.uiversion nccprt authorization devtypid 101 clver PHL_0AB uiver UI_169_mid geo US

    Since it was all mangled into a name with different conventions by team, users would have to resort to complex regular expressions to slice and dice the data based on the dimensions.

    "},{"location":"overview/#query-layer","title":"Query Layer","text":"

    In order to get a common API, have flexibility for backend implementations, and provide merged views across backends we built a query layer that can be hierarchically composed. The diagram below shows the main Netflix setup:

    We have isolated regional deployments in each region we operate in as well as a global deployment that can combine the results from multiple regions. The query and aggregation operations can be performed on the fan out so most of the big summarization operations will distribute the computation across the tree and typically to an optimized storage layer at some point.

    Allowing the query and rendering layer to work on multiple backends also makes it easier for us to consider transitioning to other backends in the future such as OpenTSDB or InfluxDB. Switching to Atlas one of the biggest hurdles was compatibility and transitioning to the new system.

    "},{"location":"overview/#stack-language","title":"Stack Language","text":"

    One of our key requirements was to be able to have deep links into a particular chart and to be able to reliably pass around or embed these images via email, wikis, html pages, etc. In addition, the user who receives the link should be able to tweak the result. Atlas uses a simple stack language that has a minimal punctuation and allows arbitrarily complex graph expressions to be encoded in a URL friendly way. This means that all images can be accessed using a GET request. The stack language is also simple to parse and interpret, allowing it to be easily consumed from a variety of tools. The core features include:

    • Embedding and linking using a GET request
    • URL friendly stack language
      • Few special symbols (comma, colon, parenthesis)
      • Easy to extend
    • Basic operations
      • Query: and, or, equal, regex, has key, not
      • Aggregation: sum, count, min, max, group by
      • Consolidation: aggregate across time
      • Math: add, subtract, multiply, etc
      • Boolean: and, or, lt, gt, etc
      • Graph settings: legends, area, transparency
    "},{"location":"overview/#graph-example","title":"Graph Example","text":"

    To illustrate, this is a sample graph image:

    This graph shows the number of requests per second and compares that with a prediction line generated using double exponential smoothing. If the number of requests per second falls below the prediction, it indicates an alert would trigger using the vertical spans. The url to generate this image follows (newlines added for readability):

    http://atlas/api/v1/graph\n  ?tz=UTC\n  &e=2012-01-01T08:00\n  &s=e-8h\n  &w=500\n  &h=150\n  &l=0\n  &q=nf.cluster,alerttest,:eq,\n     name,requestsPerSecond,:eq,:and,\n     :sum,\n     :dup,10,0.1,0.02,:des,\n     0.85,:mul,\n     :2over,:lt,\n     :rot,$name,:legend,\n     :rot,prediction,:legend,\n     :rot,:vspan,60,:alpha,alert+triggered,:legend\n

    Adding some comments to the stack expression to explain a bit what is going on:

    # Query to generate the input line\nnf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,:and,\n:sum,\n\n# Create a copy on the stack\n:dup,\n\n# Apply a DES function to generate a prediction\n# using the copy on the top of the stack. For\n# a description of the parameters see the DES\n# reference page.\n10,0.1,0.02,:des,\n\n# Used to set a threshold. The prediction should\n# be roughly equal to the line, in this case the\n# threshold would be 85% of the prediction.\n0.85,:mul,\n\n# Before              After\n# 4.                  4. actual\n# 3.                  3. prediction\n# 2. actual           2. actual\n# 1. prediction       1. prediction\n:2over,\n\n# Create a boolean signal line that is 1\n# for datapoints where the actual value is\n# less than the prediction and 0 where it\n# is greater than or equal the prediction.\n# The 1 values are where the alert should\n# trigger.\n:lt,\n\n# Apply presentation details.\n:rot,$name,:legend,\n:rot,prediction,:legend,\n:rot,:vspan,60,:alpha,alert+triggered,:legend\n

    See the stack language page for more information.

    "},{"location":"overview/#memory-storage","title":"Memory Storage","text":"

    Storage for Atlas has been a bit of a sore point. We have tried many backends and ended up moving more and more to a model where pretty much all data is stored in memory either in or off the java heap.

    "},{"location":"overview/#speed","title":"Speed","text":"

    The primary goal for Atlas is to support queries over dimensional time series data so we can slice and dice to drill down into problems. This means we frequently have a need to perform a large aggregations that involve many data points even though the final result set might be small.

    As an example consider a simple graph showing the number of requests per second hitting a service for the last 3 hours. Assuming minute resolution that is 180 datapoints for the final output. On a typical service we would get one time series per node showing the number of requests so if we have 100 nodes the intermediate result set is around 18k datapoints. For one service users went hog wild with dimensions breaking down requests by device (~1000s) and country (~50) leading to about 50k time series per node. If we still assume 100 nodes that is about 900M datapoints for the same 3h line.

    Though obviously we have to be mindful about the explosion of dimensions, we also want that where possible to be a decision based on cost and business value rather than a technical limitation.

    "},{"location":"overview/#resilience","title":"Resilience","text":"

    What all has to be working in order for the monitoring system to work? If it falls over what is involved in getting it back up? Our focus is primarily operational insight so the top priority is to be able to determine what is going on right now. This leads to the following rules of thumb:

    • Data becomes exponentially less important as it gets older
    • Restoring service is more important than preventing data loss
    • Try to degrade gracefully

    As a result the internal Atlas deployment breaks up the data into multiple windows based on the window of data they contain.

    With this setup we can show the last 6h of data as long as clients can successfully publish. The data is all in memory sharded across machines in the 6h clusters. Because the data and index are all in memory on the local node each instance is self-contained and doesn't need any external service to function. We typically run multiple mirrors of the 6h cluster so data is replicated and we can handle loss of an instance. In AWS we run each mirror in a different zone so that a zone failure will only impact a single mirror.

    The publish cluster needs to know all the instance in the mirror cluster and takes care of splitting the traffic up, so it goes to the correct shard. The set of mirror instances and shards are assigned based on slots from the Edda autoScalingGroups API. Since the set of instances for the mirrors change rarely, the publish instances can cache the Edda response and still retain successfully publish most data if Edda fails. If an instance is replaced and we can't update data we would have partial loss for a single shard if the same shard was missing in another mirror.

    Historical data can also fail in which case graphs would not be able to show data for some older windows. This doesn't have to be fully continuous, for example a common use case for us is to look at week-over-week (WoW) charts even though the span of the chart might only be a few hours. If the < 4d cluster fails but the < 16d cluster is functioning we could still serve that graph even though we couldn't show a continuous graph for the full week. A graph would still be shown but would be missing data in the middle.

    After data is written to the mirrors, they will flush to a persistence layer that is responsible for writing the data to the long term storage in S3. The data at full resolution is kept in S3 and we use hadoop (Elastic MapReduce) for processing the data to perform corrective merging of data from the mirrors, generate reports, and perform rollups into a form that can be loaded into the historical clusters.

    "},{"location":"overview/#cost","title":"Cost","text":"

    Keeping all data in memory is expensive in-particular with the large growth rate of data. The combination of dimensionality and time based partitioning used for resilience also give us a way to help manage costs.

    The first way is in controlling the number of replicas. In most cases we are using replicas for redundancy not to provide additional query capacity. For historical data that can be reloaded from stable storage we typically run only one replica as the duration of partial downtime was not deemed to be worth the cost for an additional replica.

    The second way is as part of the hadoop processing we can compute rollups so that we have a much smaller data volume to load in historical clusters. At Netflix the typical policy is roughly:

    Cluster Policy < 6h Keeps all data received < 4d ago Keeps most data, we do early rollup by dropping the node dimension on some business metrics < 16d ago Rollup by dropping the node dimension on all metrics older Explicit whitelist, typically recommend BI systems for these use-cases

    Using these policies we get greatly reduced index sizes for the number of distinct time series despite a significant amount of churn. With auto-scaling and red/black deployment models the set of instances change frequently so typically the intersection of distinct time series from one day to the next is less than 50%. Rollups target the dimensions which lead to that churn giving us much smaller index sizes. Also, in many cases dimensions like node that lead to this increase become less relevant after the node goes away. Deep-dive or investigative use-cases can still access the data using hadoop if needed.

    Snapshot of index sizes for one region in our environment:

    < 6h < 4d < 16d"},{"location":"overview/#ecosystem","title":"Ecosystem","text":"

    Internally there is a lot of tooling and infrastructure built up around Atlas. We are planning to open source many of these tools as time permits. This project is the first step for that with the query layer and some of the in-heap memory storage. Some additional parts that should come in the future:

    • User interfaces
      • Main UI for browsing data and constructing queries.
      • Dashboards
      • Alerts
    • Platform
      • Inline aggregation of reported data before storage layer
      • Storage options using off-heap memory and lucene
      • Percentile backend
      • Publish and persistence applications
      • EMR processing for computing rollups and analysis
      • Poller for SNMP, healthchecks, etc
    • Client
      • Supports integrating servo with Atlas
      • Local rollups and alerting
    • Analytics
      • Metrics volume report
      • Canary analysis
      • Outlier and anomaly detection

    These projects were originally developed and run internally and thus only needed to be setup by our team and assume many internal infrastructure pieces to run. There is a goal to try and make this easier, but it will take some time.

    "},{"location":"api/tags/","title":"Tags","text":"

    This page is a reference for the tags API provided by Atlas.

    "},{"location":"api/tags/#uri","title":"URI","text":"

    /api/v1/tags?q=<expr>&[OPTIONS]

    "},{"location":"api/tags/#query-parameters","title":"Query Parameters","text":""},{"location":"api/tags/#callback-callback","title":"Callback (callback)","text":"

    If the format is json, the callback is used for providing JSONP output. This parameter is ignored for all other formats.

    "},{"location":"api/tags/#format-format","title":"Format (format)","text":"

    Specifies the output format to use. The default is json.

    Value Description json Outputs the graph data as a JSON object. txt Uses mime-type text/plain so it will render in the browser."},{"location":"api/tags/#limit-limit","title":"Limit (limit)","text":"

    Maximum number of results to return before paging the response. If the response is paged a x-nflx-atlas-next-offset will be set to indicate the next offset. Pass the value with an offset param to get the next part of the list. If the header is not present there is no more data.

    "},{"location":"api/tags/#offset-offset","title":"Offset (offset)","text":"

    If the response is paged this param is used to indicate where the next request should pick up from.

    "},{"location":"api/tags/#query-q","title":"Query (q)","text":"

    Query expression used to select a set of metrics and manipulate them for presentation in a graph. The query expression can use query and std commands described in the reference.

    "},{"location":"api/time-parameters/","title":"Time Parameters","text":"

    APIs that accept time ranges support three parameters:

    1. Start time (s)
    2. End time (e)
    3. Time zone (tz)
    "},{"location":"api/time-parameters/#time-zone","title":"Time Zone","text":"

    Time zone can be any valid time zone id string.

    "},{"location":"api/time-parameters/#time","title":"Time","text":""},{"location":"api/time-parameters/#absolute-times","title":"Absolute Times","text":"

    Absolute times can be specified by name or as a timestamp.

    "},{"location":"api/time-parameters/#named-times","title":"Named Times","text":"

    Named times are references that will get resolved to a timestamp when a query is executed. For example, with graphs it is common to set the end time to now.

    Name Description s User specified start time. Can only be used as part of the end parameter. e User specified end time. Can only be used as part of the start parameter. now Current time. epoch January 1, 1970 UTC."},{"location":"api/time-parameters/#timestamps","title":"Timestamps","text":"

    Explicit timestamps can use the following formats:

    Format Description %Y-%m-%d Date using the timezone for the query. The time will be 00:00. %Y-%m-%dT%H:%M Date time using the timezone for the query. The seconds will be 00. %Y-%m-%dT%H:%M:%S Date time using the timezone for the query. %s Seconds since January 1, 1970 UTC. %s (ms) Milliseconds since January 1, 1970 UTC.

    For times since the epoch both seconds and milliseconds are supported because both are in common use and it helps to avoid confusion when copy and pasting from another source. Values less than or equal 2,147,483,648 (231) will be treated as a timestamp in seconds. Values above that will be treated as a timestamp in milliseconds. So times from the epoch to 1970-01-25T20:31:23 cannot be represented in the millisecond form. In practice, this limitation has not been a problem.

    The first three formats above can also be used with an explicit time zone.

    "},{"location":"api/time-parameters/#zone-offsets","title":"Zone Offsets","text":"

    An explicit time zone can be specified as Z to indicate UTC or by using an offset in hours and minutes. For example:

    2012-01-12T01:37Z\n2012-01-12T01:37-00:00\n2012-01-12T01:37-07:00\n2012-01-12T01:37-07:42\n

    A common format recommended for logs at Netflix is an ISO timestamp in UTC:

    2012-01-12T01:37:27Z\n

    These can be copy and pasted to quickly check a graph for a timestamp from a log file. For practical purposes in Atlas a -00:00 offset timezone can be thought of as UTC, but depending on the source may have some additional meaning.

    "},{"location":"api/time-parameters/#relative-times","title":"Relative Times","text":"

    Relative times consist of a named time used for an anchor and an offset duration.

    <named=time> '-' <duration>\n<named-time> '+' <duration>\n

    For example:

    Pattern Description now-1w One week ago. e-1w One week before the end time. s+6h Six hours after the start time. s+P2DT6H5M Two days, 6 hours, and 5 minutes after the start time."},{"location":"api/time-parameters/#durations","title":"Durations","text":""},{"location":"api/time-parameters/#duration-vs-period","title":"Duration vs Period","text":"

    This section is using the definition of duration and period from the java time libraries. In short:

    • Durations are a fixed number of seconds.
    • Periods represent a length of time in a given calendar. For example, the length of a day will vary if there is a daylight savings transition.

    The offset used for relative times in Atlas are durations because:

    • It is primarily focused on shorter time spans (~ 2 weeks) where drift is less of an issue. In this range the variation is most commonly seen for the daylight savings time transitions.
    • For time shifts day over day and week over week are the most common for operational purposes. During daylight savings time transitions a fixed duration seems to cause the least confusion, especially when the transition time is within the window being displayed. The primary use-case where periods were found to be more beneficial and less confusing is for week over week when looking at a small window that does not include the transition. In those cases if the signal reflects human behavior, such as playing movies, then the week over week pattern will typically line up better if using a period.
    "},{"location":"api/time-parameters/#simple-duration","title":"Simple Duration","text":"

    A simple offset uses a positive integer followed by one of these units:

    • s, second, or seconds
    • m, min, minute, or minutes
    • h, hour, or hours
    • d, day, or days
    • w, week, or weeks
    • month or months
    • y, year, or years

    All durations are a fixed number of seconds. A day is 24 hours, week is 7 days, month is 30 days, and a year is 365 days.

    "},{"location":"api/time-parameters/#iso-duration","title":"ISO Duration","text":"

    The duration can also be specified as an ISO duration string, but day (D) is the largest part that can be used within the duration. Others such as week (W), month (M), and year (Y) are not supported. Examples:

    Pattern Description P1D One day of exactly 24 hours. P1DT37M One day and 37 minutes. PT5H6M Five hours and six minutes.

    For more details see docs on parsing durations.

    "},{"location":"api/graph/anonymization/","title":"Anonymization","text":"

    Occasionally it is useful to show a graph, but the exact values need to be suppressed. This can be useful for communicating with external support or including in a presentation. To avoid showing the actual values disable tick labels using tick_labels=off and either disable the legend or disable the legend stats.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend_stats=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=off\n

    If you also want to suppress the time axis, then use the only_graph option:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &only_graph=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/axis-bounds/","title":"Axis Bounds","text":"

    The upper and lower bounds for an axis can be set to an explicit floating point value or:

    • auto-style: automatically determine the bounds based on the data and the style settings for that data. In particular, if the line style is area or stack, then the bounds will be adjusted to show the filled area. This is the default behavior.
    • auto-data: automatically determine the bounds based on the data. This will only take into account the values of the lines. In the case of stack it will account for the position of the stacked lines, but not the filled area.

    When selecting bounds it is important to think about how it can impact the perception of what is shown. Automatic bounds can be useful for zooming in on the data, but can also lead to mis-perceptions for someone quickly scanning a dashboard. Consider these two graphs showing percent CPU usage on an instance:

    Automatic Bounds Explicit Bounds

    The automatic bounds allows us to see much more detail, but could lead a casual observer to think there were frequent large spikes in CPU usage rather than just noise on a machine with very little load.

    See Tick Labels for information on Y axis label formatting and suffix information.

    "},{"location":"api/graph/axis-bounds/#default-lower","title":"Default Lower","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by\n
    "},{"location":"api/graph/axis-bounds/#default-lower-stack","title":"Default Lower Stack","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#default-upper","title":"Default Upper","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :neg\n
    "},{"location":"api/graph/axis-bounds/#default-upper-stack","title":"Default Upper Stack","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :neg,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#explicit-bounds","title":"Explicit Bounds","text":"

    Note the &l=0 and &u=60e3 parameters.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &l=0\n  &u=60e3\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by\n

    Note

    It is possible to define the boundaries beyond the range of the data source so that a graph appears empty.

    "},{"location":"api/graph/axis-bounds/#auto-lower","title":"Auto Lower","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &l=auto-data\n  &\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-bounds/#auto-upper","title":"Auto Upper","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &s=e-1d\n  &tz=UTC\n  &u=auto-data\n  &\n  &q=\n    name,sps,:eq,\n    nf.cluster,(,nccp-xbox,nccp-silverlight,),:in,\n    :and,\n    :sum,\n    (,nf.cluster,),:by,\n    :stack\n
    "},{"location":"api/graph/axis-scale/","title":"Axis Scale","text":"

    Scales determine how the data value for a line will get mapped to the Y-Axis. There are currently five scales that can be used for an axis:

    • Linear
    • Logarithmic
    • Log Linear
    • Power of 2
    • Square Root

    See Tick Labels for information on Y axis label formatting and suffix information.

    "},{"location":"api/graph/axis-scale/#linear","title":"Linear","text":"

    A linear scale uniformly maps the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v+b where m and b are automatically chosen based on the domain and range.

    This is the default scale for an axis and will get used if no explicit scale is set. Since 1.6, it can also be used explicitly:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=linear\n

    "},{"location":"api/graph/axis-scale/#logarithmic","title":"Logarithmic","text":"

    A logarithmic scale emphasizes smaller values when mapping the input values (domain) to the Y-axis location (range). This is often used if two lines with significantly different magnitudes are on the same axis. If v is datapoint in a time series, then y=m*log(v)+b where m and b are automatically chosen based on the domain and range. In many cases, using a separate Y-axis can be a better option that doesn't distort the line as much.

    To use this mode, add scale=log (prior to 1.6 use o=1).

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=log\n

    "},{"location":"api/graph/axis-scale/#log-linear","title":"Log Linear","text":"

    Since 1.8.

    A logarithmic scale for powers of 10 with linear behavior between ticks. This is useful for heatmap views of percentile distributions. Note that unit suffixes change with this scale.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=log-linear\n

    "},{"location":"api/graph/axis-scale/#power-of-2","title":"Power of 2","text":"

    Since 1.6.

    A power scale that emphasizes larger values when mapping the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v2+b where m and b are automatically chosen based on the domain and range. To emphasize smaller values see the square root scale.

    To use this mode, add scale=pow2.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=pow2\n

    "},{"location":"api/graph/axis-scale/#square-root","title":"Square Root","text":"

    Since 1.6.

    A power scale that emphasizes smaller values when mapping the input values (domain) to the Y-axis location (range). If v is datapoint in a time series, then y=m*v0.5+b where m and b are automatically chosen based on the domain and range. To emphasize larger values see the power of 2 scale.

    To use this mode, add scale=sqrt.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    minuteOfHour,:time,\n    1e3,:add,\n    minuteOfHour,:time\n  &scale=sqrt\n

    "},{"location":"api/graph/basics/","title":"Basics","text":"

    This section gives some examples to get started quickly creating simple graphs.

    • Single Line
    • Adding a Title
    • Multiple Lines
    • Group By
    • Simple Math
    • Binary Operations
    "},{"location":"api/graph/basics/#single-line","title":"Single Line","text":"

    The only required parameter is q which specifies the query expression for a line. The other two common parameters are for setting the start time, s, and the end time, e, for the data being shown. Usually the start time will be set relative to the end time, such as e-3h, which indicates 3 hours before the end time. See time parameters for more details on time ranges.

    Putting it all together:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n

    The resulting PNG plot displays time along the X axis, automatically scaled to the proper time range. The Y axis labels are scaled using metric prefixes to show the measured value. A legend is displayed under the plot with the name(s) of the expression results and a set of statistics computed on the plotted data for the time window. The small text at the very bottom reflect query parameters and step size along with some processing statistics.

    "},{"location":"api/graph/basics/#adding-a-title","title":"Adding a Title","text":"

    The graph title can be set using the title parameter. Similarly, a Y-axis label can be set using the ylabel parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n  &title=Starts+Per+Second\n  &ylabel=sps\n

    "},{"location":"api/graph/basics/#multiple-lines","title":"Multiple Lines","text":"

    Multiple expressions can be placed on a chart by concatenating the expressions, e.g., showing a query expression along with a constant value:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    500e3\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#group-by","title":"Group By","text":"

    Multiple lines can also be a result of a single expression via group by.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#simple-math","title":"Simple Math","text":"

    A number of operators are provided to manipulate a line. See the math section of the stack language tutorial for a complete list. Example that negates the value of a line:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    :neg\n  &s=e-2d\n

    Example that negates and then applies absolute value to get the original value back (since all values were positive in the input):

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    :neg,\n    :abs\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#binary-operations","title":"Binary Operations","text":"

    Lines can be combined using binary math operators such as add or multiply. Example using divide:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    1000,:div\n  &s=e-2d\n

    If used with a group by, then either:

    • Both sides have the same group by clause. In this case an inner join will be preformed and the binary operation will be applied to the corresponding entries from both sides.
    • One side is not a grouped expression, and the binary operation will be applied for each instance in the grouped result set.
    "},{"location":"api/graph/basics/#both-sides-grouped","title":"Both Sides Grouped","text":"

    Dividing by self with both sides grouped:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :dup,\n    :div\n  &s=e-2d\n

    "},{"location":"api/graph/basics/#one-side-grouped","title":"One Side Grouped","text":"

    Dividing a grouped expression by a constant:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    1000,:div\n  &s=e-2d\n

    Equivalent to the previous expression, but the right-hand side is grouped and it uses multiply instead of divide:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    0.001,name,sps,:eq,\n    (,nf.cluster,),:by,\n    :mul\n  &s=e-2d\n

    "},{"location":"api/graph/color-palettes/","title":"Color Palettes","text":"

    The following color palettes are supported:

    • armytage
    • epic
    • blues
    • greens
    • oranges
    • purples
    • reds
    • custom

    There is also a hashed selection mode that can be used so that a line with a given label will always get the same color.

    "},{"location":"api/graph/color-palettes/#armytage","title":"Armytage","text":"

    This is the default color palette, it comes from the paper A Colour Alphabet and the Limits of Colour Coding by Paul Green-Armytage. Two colors, Xanthin and Yellow, are excluded because users found them hard to distinguish from a white background when used for a single pixel line. So overall there are 24 distinct colors with this palette.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=armytage\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#epic","title":"Epic","text":"

    This is a legacy palette that alternates between shades of red, green, and blue. It is supported for backwards compatibility, but not recommended.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=epic\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#blues","title":"Blues","text":"

    Shades of blue.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=blues\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#greens","title":"Greens","text":"

    Shades of green.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=greens\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#oranges","title":"Oranges","text":"

    Shades of orange.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=oranges\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#purples","title":"Purples","text":"

    Shades of purple.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=purples\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#reds","title":"Reds","text":"

    Shades of red.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=reds\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#custom","title":"Custom","text":"

    A custom color palette can be provided for a graph by using a list of comma separated hex color values following the ASL list format (,HEX,HEX,HEX,). This is mainly used to customize the colors for the result of a group by where you cannot set the color for each line using the list.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=(,1a9850,91cf60,d9ef8b,fee08b,fc8d59,d73027,)\n  &stack=1\n  &tz=UTC\n  &q=\n    1,1,1,1,1,1,1\n

    "},{"location":"api/graph/color-palettes/#hashed-selection","title":"Hashed Selection","text":"

    Any of the palettes above can be prefixed with hash: to select the color using a hashing function on the label rather than picking the next color from the list. The primary advantage is that the selected color will always be the same for a given label using a particular palette. However, some nice properties of the default mode are lost:

    • Colors can be duplicated even with a small number of lines. Hash collisions will result in the same color.
    • The palettes are ordered to try and make the stacked appearance and legends easier to read. For the armytage palette it is ordered so adjacent colors are easy to distinguish. For the palettes that are shades of a color they are ordered from dark to light shades to create a gradient effect. Hashing causes an arbitrary ordering of the colors from the palette.

    The table below illustrates the difference by adding some additional lines to a chart for the second row:

    armytage hash:armytage

    Example:

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &palette=hash:armytage\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack\n  &tz=UTC\n

    "},{"location":"api/graph/examples/","title":"Examples","text":"

    Browse the sidebar to get a good overview of graph options. It is recommended to at least go through the basics section. There is also a quick visual index below:

    Line Area Stack Stacked Percent

    VSpan Transparency Line Width Palettes

    Bounds Scales Multi Y Time Zones

    "},{"location":"api/graph/graph/","title":"Introduction","text":"

    The Graph API is the primary means to retrieve data from an Atlas store.

    The default response is a PNG image plotting data matching the Atlas Stack Language expression along with optional parameters to control time ranges, size, style, labels, etc. For a quick overview by example see the examples page.

    If graphs look familiar, that's because the design and language were inspired by RRDtool. RRD style graphs offer concise and highly customizable views of time series data. While a number of observability tools offer dynamic charts, a major benefit of these PNG graphs is the ability to snapshot data in time, particularly when that data may expire from a high throughput data store; PNGs are forever. Additionally, the majority of email and on-call systems support images out of the box without having to worry about porting a dynamic graphing library to various browsers and clients.

    The API only supports HTTP query strings at this time. This makes it easy to construct queries with tooling and share the URIs with other users. No JSON request payloads needed.

    Additional Output formats, including JSON, can be found in Outputs.

    "},{"location":"api/graph/graph/#uri","title":"URI","text":"

    /api/v1/graph?q=<expr>[&OPTIONS]

    "},{"location":"api/graph/graph/#http-method","title":"HTTP Method","text":"

    GET - Only the GET method is allowed at this time.

    "},{"location":"api/graph/graph/#query-parameters","title":"Query Parameters","text":""},{"location":"api/graph/graph/#data","title":"Data","text":"

    The only required query param is q which is the query expression used by the user to select and manipulate data. The simplest API query you can make is /api/v1/graph?q=42. This will produce a graph from Atlas with a straight line having a value of 42 for 3 hours* with a legend including statistics for the query period.

    All query params related to fetching data:

    Name Description Default Type q Query expression must be specified by user expr step Step size for data auto duration

    Warning

    In most cases users should not set step directly. The step parameter is deprecated.

    "},{"location":"api/graph/graph/#time","title":"Time","text":"

    There are three parameters to control the time range used for a graph:

    Name Description Default Type s Start time e-3h* Time e End time now* Time tz Time zone US/Pacific* Time zone ID

    For more information on the behavior see the time parameters page.

    "},{"location":"api/graph/graph/#image-flags","title":"Image Flags","text":"Name Description Default Type title Set the graph title no title String no_legend Suppresses the legend 0 boolean no_legend_stats Suppresses summary stats for the legend 0 boolean axis_per_line Put each line on a separate Y-axis 0 boolean only_graph Only show the graph canvas 0 boolean vision Simulate different vision types normal vision type"},{"location":"api/graph/graph/#image-size","title":"Image Size","text":"

    There are four parameters to control the image size and layout used for a graph:

    Name Description Default Type layout Mode for controlling exact or relative sizing canvas layout mode w Width of the canvas or image 700* int h Height of the canvas or image 300* int zoom Transform the size by a zoom factor 1.0 float

    For more information on the behavior see the graph layout page.

    "},{"location":"api/graph/graph/#y-axis","title":"Y-Axis","text":"Name Description Default Type stack Set the default line style to stack 0 boolean l Lower bound for the axis auto-style axis bound u Upper bound for the axis auto-style axis bound ylabel Label for the axis no label String palette Color palette to use armytage palette o Use a logarithmic scale (deprecated in 1.6) 0 boolean scale Set the axis scale to use (since 1.6) linear scale tick_labels Set the mode to use for tick labels decimal tick label mode sort Set the mode to use for sorting the legend expr order sort mode order Set the order ascending or descending for the sort asc order"},{"location":"api/graph/graph/#output-format","title":"Output Format","text":"Name Description Default Type format Output format to use png output format callback Method name to use for JSONP callback none String"},{"location":"api/graph/graph/#defaults","title":"Defaults","text":"

    If marked with an * the default shown can be changed by the administrator for the Atlas server. As a result the default in the table may not match the default you see. The defaults listed do match those used for the primary Atlas backends in use at Netflix.

    For users running their own server, the config settings and corresponding query params are:

    Key Query Param atlas.webapi.graph.start-time s atlas.webapi.graph.end-time e atlas.webapi.graph.timezone tz atlas.webapi.graph.width w atlas.webapi.graph.height h atlas.webapi.graph.palette palette"},{"location":"api/graph/graph/#boolean-flags","title":"Boolean Flags","text":"

    Flags with a true or false value are specified using 1 for true and 0 for false.

    "},{"location":"api/graph/heatmap/","title":"Heatmap","text":"

    Atlas primarily supports visualizing data in line charts. As of 1.8, Atlas can also visualize via heatmaps using the :heatmap line style. The graph area is broken up into a series of cells and a count for each cell is incremented when a measurement falls within the cells boundaries. Colors or shades of colors then fill in cells based on the final count.

    "},{"location":"api/graph/heatmap/#percentiles","title":"Percentiles","text":"

    Heatmaps are particularly useful on top of percentile metrics to analyze the entire measurement range.

    Note Using the log linear scale will help to highlight clustered regions of measurements via &scale=log-linear. The example also uses data not available in the demo Atlas instance.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,requestLatency,:eq,\n    :percentile-heatmap,\n    \n  &scale=log-linear\n

    "},{"location":"api/graph/heatmap/#bounds","title":"Bounds","text":"

    The &heatmap_l= and &heatmap_u parameters can be used to narrow the range of cells displayed in a heatmap. Heatmap bounds act on the count of measurements in a cell and the palette colors or shades chosen. Depending on the bound limits, some cells may appear empty.

    No Heatmap BoundsWith Bounds (&heatmap_l=1.2&heatmap_u=1.3)
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#palette","title":"Palette","text":"

    The palette used for filling heatmap cells can be changed via the &heatmap_palette= parameter. By default, a color is chosen from the global palette (based on whether the heatmap is the first or a later expression). A gradient is then applied to that color with a lighter gradient representing smaller cell counts and darker representing larger counts.

    Default PaletteReds Palette (&heatmap_palette=reds)
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#custom-palette","title":"Custom Palette","text":"

    A custom palette may be provided by listing the hex colors to use in descending order, meaning the color to use for the highest cell counts must appear first.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :heatmap\n  &heatmap_palette=(,8cd1b9,46adbc,2a91b8,1978b3,335ca9,413e95,361566,)\n

    For further information, see Custom Color Palettes.

    "},{"location":"api/graph/heatmap/#order-of-expressions","title":"Order of Expressions","text":"

    When overlaying expressions with a heatmap and using the default palette, the order of expressions determines the color gradient used for cells. For example, if the heatmap expression is second in the query, the second palette color will be used as the gradient:

    Heatmap FirstHeatmap Second
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap,\nname,sps,:eq,\n\n
    name,sps,:eq,\nname,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#label","title":"Label","text":"

    The label for the heatmap can be changed via the &heatmap_label= parameter. By default, the label is simply heatmap.

    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/heatmap/#scale","title":"Scale","text":"

    Similar to axis scales, the scale of the heatmap cell colors (or gradients) can be adjusted using the &heatmap_scale= parameter. By default, the scale is linear though any of the valid scales may be used.

    LinearLog Linear
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"api/graph/layout/","title":"Layout","text":"

    The diagram below shows the parts of an Atlas graph and will be used when describing the behavior for various options.

    The layout for graph images is trying to accomplish two main goals:

    "},{"location":"api/graph/layout/#usable-canvas-size","title":"Usable Canvas Size","text":"

    Keep the canvas usable regardless of the number of lines, axes, etc that are competing for space. For example, the canvas area should not become too small due to the number of lines on the chart.

    Good Layout Poor Layout"},{"location":"api/graph/layout/#canvas-alignment","title":"Canvas Alignment","text":"

    Make it easy to align the canvas portion of several graphs on an html page. This is important because it makes it easier to find visual correlations between multiple graphs on a dashboard.

    In particular if arranged in a grid with the image in the top left of each cell, then the canvas should line up vertically for columns:

    And horizontally for rows:

    In the graph layout diagram at the top, this is why variable components such as multi axes, legend entries, and warnings are positioned on either the right side or the bottom of the canvas.

    "},{"location":"api/graph/layout/#modes","title":"Modes","text":"

    There are four supported layout modes that can be used with the layout query parameter:

    • canvas: the width or height are for the canvas component within the chart. The actual image size will be calculated based on the number of entries in the legend, number of axes, etc. This is the default behavior.
    • image: the width or height are for the final image not including the zoom parameter. To try and adhere to layout goals when using this mode everything below the X-axes will automatically be suppressed. Vertical alignment will still hold as long as all graphs use the same number of Y-axes. Horizontal alignment will still hold as long as all graphs use the same number of X-axes.
    • iw: use exact image sizing for the width and canvas sizing for the height.
    • ih: use exact image sizing for the height and canvas sizing for the width.
    "},{"location":"api/graph/layout/#examples","title":"Examples","text":""},{"location":"api/graph/layout/#canvas","title":"Canvas","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=canvas\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image","title":"Image","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=image\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image-width","title":"Image Width","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=iw\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/layout/#image-height","title":"Image Height","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &h=175\n  &layout=ih\n  &q=\n    name,sps,:eq,\n    :sum,\n    (,nf.cluster,),:by\n  &s=e-1d\n  &tz=UTC\n  &w=400\n
    "},{"location":"api/graph/legends/","title":"Legends","text":"

    Options for adjusting legend:

    • Automatic
    • Explicit
    • Variables
    • Disable
    • Disable Stats
    • Sorting
    "},{"location":"api/graph/legends/#automatic","title":"Automatic","text":"

    If no explicit legend is specified, then the system will generate an automatic legend that summarizes the expression. There is no particular guarantee about what it will contain and in some cases it is difficult to generate a usable legend automatically. Example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    hourOfDay,:time,\n    100,:mul,\n    minuteOfHour,:time,\n    :add\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#explicit","title":"Explicit","text":"

    The legend for a line can be explicitly set using the :legend operator.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    hourOfDay,:time,\n    100,:mul,\n    minuteOfHour,:time,\n    :add,\n    time+value,:legend\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#variables","title":"Variables","text":"

    Tag keys can be used as variables to plug values into the legend. This is useful when working with group by operations to customize the legend for each output. The variable can be expressed as a $ followed by the tag key if it is the only part of the legend:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    $nf.cluster,:legend\n  &s=e-1w\n

    Or as $( the tag key and a closing ) if combined with other text:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    cluster+$(nf.cluster)+sps,:legend\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#disable","title":"Disable","text":"

    Legends can be disabled using the no_legend graph parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#disable-stats","title":"Disable Stats","text":"

    You can also save veritical space and keep the legend by disabling the summary stats shown in the legend using the no_legend_stats graph parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend_stats=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n

    "},{"location":"api/graph/legends/#sorting","title":"Sorting","text":"

    By default the legend for an axis will be ordered based on the order of the expressions on the stack. If an expression results in multple lines, i.e. a group by, then they will be sorted by the label.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n

    "},{"location":"api/graph/legends/#overall","title":"Overall","text":"

    To sort all lines on a given axis using a different mode use the sort URL parameter.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n  &sort=max\n

    To change it to descending order use the order parameter, e.g.:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-12h\n  &sort=max\n  &order=desc\n

    "},{"location":"api/graph/legends/#group-by-expression","title":"Group By Expression","text":"

    If more control is needed, then sorting can be applied to a particular group by expression. This can be useful for things like alerting visualizations where some common lines like the threshold and trigger indicator should be pinned to the top, but it is desirable to sort other results based on a stat like max. For example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    120e3,threshold,:legend,\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :dup,\n    :max,\n    120e3,:gt,\n    30,:alpha,\n    :vspan,\n    trigger,:legend,\n    :swap,\n    max,:sort,\n    desc,:order,\n    $nf.cluster,:legend\n  &s=e-12h\n

    "},{"location":"api/graph/legends/#sorting-modes","title":"Sorting Modes","text":"
    • legend: alphabetically based on the label used in the legend. This is the default.
    • min: using the minimum value shown the legend stats.
    • max: using the maximum value shown the legend stats.
    • avg: using the average value shown the legend stats.
    • count: using the count value shown the legend stats.
    • total: using the total value shown the legend stats.
    • last: using the last value shown the legend stats.
    "},{"location":"api/graph/legends/#sorting-order","title":"Sorting Order","text":"
    • asc: use ascending order. This is the default.
    • desc: used descending order.
    "},{"location":"api/graph/line-attributes/","title":"Line Attributes","text":"

    In addition to the line style and legend the following attributes can be adjusted:

    • Color
    • Transparency
    • Line Width
    "},{"location":"api/graph/line-attributes/#color","title":"Color","text":"

    By default the color will come from the palette that is in use. However the color for a line can also be set explicitly using the :color operator:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    f00,:color\n  &s=e-1w\n

    Note, that for a group by all results will get the same attributes, so in this case all would end up being the same color:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    f00,:color\n  &s=e-1w\n

    "},{"location":"api/graph/line-attributes/#transparency","title":"Transparency","text":"

    The transparency of a line can be set using the :alpha operator or by explicitly setting the alpha channel as part of the color.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    :area,\n    40,:alpha\n  &s=e-2d\n

    Setting the alpha explicitly as part of the color:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    :area,\n    40ff0000,:color\n  &s=e-2d\n

    "},{"location":"api/graph/line-attributes/#line-width","title":"Line Width","text":"

    Adjust the stroke width used for a line:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    6h,:offset,\n    3,:lw\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/","title":"Line Styles","text":"

    There are four line styles available:

    • Line
    • Area
    • Stack
    • Vertical Span
    • Heatmap

    Multiple styles can be used in the same chart or combined with other operations.

    • Stacked Percentage
    • Combinations
    • Layering
    "},{"location":"api/graph/line-styles/#line","title":"Line","text":"

    The default style is line.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :line\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#area","title":"Area","text":"

    Area will fill the space between the line and 0 on the Y-axis. The alpha setting is just used to help visualize the overlap.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :area,\n    40,:alpha\n  &s=e-1w\n

    Similarly for negative values:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :neg,\n    :area,\n    40,:alpha\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#stack","title":"Stack","text":"

    Stack is similar to area, but will stack the filled areas on top of each other.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack\n  &s=e-1w\n

    Similarly for negative values:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :neg,\n    :stack\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#stacked-percentage","title":"Stacked Percentage","text":"

    The stack style can be combined with the :pct operator to get a stacked percentage chart for a group by:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :pct,\n    :stack\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#heatmap","title":"Heatmap","text":"

    Since 1.8.

    Plotting many time series with a heat map can be useful for identifying concentrations of measurements where individual lines may produce too much noise.

    See Heatmap for more details.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :heatmap\n

    "},{"location":"api/graph/line-styles/#vertical-span","title":"Vertical Span","text":"

    The vertical span style converts non-zero to spans. This is often used to highlight some portion of another line.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    50e3,:gt,\n    :vspan\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#combinations","title":"Combinations","text":"

    Line styles can be combined, e.g., to highlight the portion of a line that is above a threshold:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &no_legend=1\n  &q=\n    name,sps,:eq,\n    :dup,\n    5003,:gt,\n    :vspan,\n    40,:alpha,\n    50e3\n  &s=e-1w\n

    "},{"location":"api/graph/line-styles/#layering","title":"Layering","text":"

    The z-order is based on the order of the expression on the stack.

    /api/v1/graph?\n  e=2015-03-10T13:13\n  &no_legend=1\n  &q=\n    t,name,sps,:eq,\n    :sum,\n    :set,\n    t,:get,\n    :stack,\n    t,:get,\n    1.1,:mul,\n    6h,:offset,\n    t,:get,\n    4,:div,\n    :stack\n  &s=e-2d\n

    "},{"location":"api/graph/multi-y/","title":"Multi Y Axis","text":"

    Examples for using multiple Y-axes:

    • Explicit
    • Explicit Bounds
    • Axis Per Line
    • Palettes
    "},{"location":"api/graph/multi-y/#explicit","title":"Explicit","text":"

    By default all lines will go on axis 0, the one on the left side. A different axis can be specified using the :axis operation.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    "},{"location":"api/graph/multi-y/#explicit-bounds","title":"Explicit Bounds","text":"

    By default all axes will pick up axis settings with no qualifier:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=0\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    Bounds and other axis settings can be set per axis, e.g., this graph moves the constant line for 42 to a separate axis and sets the lower bound to 0 via the &l.1=0 parameter. This would work as well for &u.1=100e3. Append the index after the l. or u. :

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l.1=0\n  &q=\n    name,sps,:eq,\n    42,1,:axis\n

    "},{"location":"api/graph/multi-y/#axis-per-line","title":"Axis Per Line","text":"

    There is a convenience operation to plot each line on a separate axis.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &axis_per_line=1\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-p,:re,\n    :and,\n    (,nf.cluster,),:by\n

    If there are too many lines and it would be over the max Y-axis limit, then a warning will be shown:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &axis_per_line=1\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n

    "},{"location":"api/graph/multi-y/#palettes","title":"Palettes","text":"

    The color of the first line on an axis will get used as the color of the axis. The intention is to make it easy to understand which axis a line is associated with and in an image dynamic clues like hover cannot be used. Generally it is recommended to only have one line per axis when using multi-Y. Example:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=01\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    minuteOfHour,:time,\n    1,:axis\n

    Though we recommend not using more than one line per axis with multi-Y, a color palette can be specified for a specific axis. This can be used to select shades of a color for an axis so it is still easy to visually associate which axis a line belongs to:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &l=01\n  &palette.0=reds\n  &palette.1=blues\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    :stack,\n    minuteOfHour,:time,\n    1,:axis\n

    "},{"location":"api/graph/outputs/","title":"Output Formats","text":"

    The following output formats are supported by default for graphing:

    • png
    • csv
    • txt
    • json
    • std.json
    • stats.json
    "},{"location":"api/graph/outputs/#png","title":"png","text":"

    This is the default and creates a PNG image for the graph. The mime type is image/png.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=png\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-3m\n  &tz=UTC\n

    "},{"location":"api/graph/outputs/#csv","title":"csv","text":"

    Comma separated value output. The mime type is text/csv.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=csv\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    \"timestamp\",\"hourOfDay\",\"minuteOfHour\",\"NaN\"\n2012-01-01T08:56:00Z,8.000000,56.000000,NaN\n2012-01-01T08:57:00Z,8.000000,57.000000,NaN\n2012-01-01T08:58:00Z,8.000000,58.000000,NaN\n2012-01-01T08:59:00Z,8.000000,59.000000,NaN\n2012-01-01T09:00:00Z,9.000000,0.000000,NaN\n
    "},{"location":"api/graph/outputs/#txt","title":"txt","text":"

    Same as csv except that the separator is a tab character instead of a comma. The mime type will be text/plain so it is more likely to render directly in the browser rather than trigger a download.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=txt\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    \"timestamp\" \"hourOfDay\" \"minuteOfHour\"  \"NaN\"\n2012-01-01T08:56:00Z    8.000000    56.000000   NaN\n2012-01-01T08:57:00Z    8.000000    57.000000   NaN\n2012-01-01T08:58:00Z    8.000000    58.000000   NaN\n2012-01-01T08:59:00Z    8.000000    59.000000   NaN\n2012-01-01T09:00:00Z    9.000000    0.000000    NaN\n
    "},{"location":"api/graph/outputs/#json","title":"json","text":"

    JSON output representing the data. Note that it is not standard json as numeric values like NaN will not get quoted.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"values\" : [ [ 8.0, 56.0, NaN ], [ 8.0, 57.0, NaN ], [ 8.0, 58.0, NaN ], [ 8.0, 59.0, NaN ], [ 9.0, 0.0, NaN ] ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/outputs/#stdjson","title":"std.json","text":"

    Same as json except that numeric values which are not recognized by standard json will be quoted. The mime type is application/json.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=std.json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"values\" : [ [ 8.0, 56.0, \"NaN\" ], [ 8.0, 57.0, \"NaN\" ], [ 8.0, 58.0, \"NaN\" ], [ 8.0, 59.0, \"NaN\" ], [ 9.0, 0.0, \"NaN\" ] ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/outputs/#statsjson","title":"stats.json","text":"

    Provides the summary stats for each line, but not all of the data points. The mime type is application/json.

    /api/v1/graph?\n  e=2012-01-01T09:00\n  &format=stats.json\n  &q=\n    hourOfDay,:time,\n    minuteOfHour,:time,\n    NaN\n  &s=e-5m\n  &tz=UTC\n
    {\n  \"start\" : 1325408160000,\n  \"end\" : 1325408460000,\n  \"step\" : 60000,\n  \"legend\" : [ \"hourOfDay\", \"minuteOfHour\", \"NaN\" ],\n  \"metrics\" : [ {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"hourOfDay\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"minuteOfHour\"\n  }, {\n    \"atlas.offset\" : \"0w\",\n    \"name\" : \"NaN\"\n  } ],\n  \"stats\" : [ {\n    \"count\" : 5,\n    \"avg\" : 8.2,\n    \"total\" : 41.0,\n    \"max\" : 9.0,\n    \"min\" : 8.0,\n    \"last\" : 9.0\n  }, {\n    \"count\" : 5,\n    \"avg\" : 46.0,\n    \"total\" : 230.0,\n    \"max\" : 59.0,\n    \"min\" : 0.0,\n    \"last\" : 0.0\n  }, {\n    \"count\" : 0,\n    \"avg\" : NaN,\n    \"total\" : NaN,\n    \"max\" : NaN,\n    \"min\" : NaN,\n    \"last\" : NaN\n  } ],\n  \"notices\" : [ ]\n}\n
    "},{"location":"api/graph/tick/","title":"Tick Labels","text":"

    The following tick (Y axis numeric labels) modes are supported:

    • decimal
    • binary
    • duration
    • off
    "},{"location":"api/graph/tick/#decimal","title":"Decimal","text":"

    This is the default mode. Y-axis tick labels will be formatted using the metric prefix to indicate the magnitude for values that are greater than one thousand or less than one.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=decimal\n

    Really large values will fallback to scientific notation, e.g.:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by,\n    1e180,:mul\n  &s=e-1w\n  &tick_labels=decimal\n

    "},{"location":"api/graph/tick/#binary","title":"Binary","text":"

    For values such as memory sizes it is sometimes more convenient to view the label using a power of 1024 rather than a power of 1000. If the tick label mode is set to binary, then the IEC binary prefix will be used.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=binary\n

    "},{"location":"api/graph/tick/#duration","title":"Duration","text":"

    Since 1.7.1.

    Useful for timers or percentiles that measure latency, provides ticks with time unit suffix.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,requestLatency,:eq,\n    nf.node,wii-node,:eq,\n    :and\n  &tick_labels=duration\n

    "},{"location":"api/graph/tick/#off","title":"Off","text":"

    For presentations or sharing it is sometimes useful to anonymize the chart. One way of doing that is to disable the Y-axis labels by setting the tick label mode to off.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    (,nf.cluster,),:by\n  &s=e-1w\n  &tick_labels=off\n

    "},{"location":"api/graph/tick/#offset-labels","title":"Offset Labels","text":"

    In situations where a graph has very small changes in value that generate a significant number of digits per tick, ticks may be labeled with offsets in order to fit the labels in the layout. A base value is displayed at the bottom of the axis and positive or negative offsets from the base displayed next to the ticks.

    For example, if the amount of disk space used varies by 1 byte occasionally, the ticks will be labeled by in increments of +1.0.

    Note

    It is possible for queries spanning different data sources to display offset labels due to differing schemes used to encode floating point values.

    If offsets are not desirable, try adjusting the y axis bounds.

    "},{"location":"api/graph/time-shift/","title":"Time Shift","text":"

    A common use-case is to compare a given line with a shifted line to compare week-over-week or day-over-day.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-silverlight,:eq,\n    :and,\n    :sum,\n    :dup,\n    1w,:offset\n

    The $(atlas.offset) variable can be used to show the offset in a custom legend:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq,\n    nf.cluster,nccp-silverlight,:eq,\n    :and,\n    :sum,\n    :dup,\n    1w,:offset,\n    :list,\n    (,$nf.cluster+(offset=$atlas.offset),:legend,\n    ),:each\n

    "},{"location":"api/graph/time-zone/","title":"Time Zones","text":"

    Examples for specifying the time zone:

    • Single Zone
    • Multi Zone
    • Daylight Savings Time
    "},{"location":"api/graph/time-zone/#single-zone","title":"Single Zone","text":"

    Most graphs will only show a single time zone. By default the zone is US/Pacific. To set to another zone such as UTC use the tz query parameter:

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &tz=UTC\n

    "},{"location":"api/graph/time-zone/#multi-zone","title":"Multi Zone","text":"

    The tz parameter can be specified multiple times in which case one X-axis will be shown per zone. Start and end times will be based on the first time zone listed.

    /api/v1/graph?\n  e=2012-01-01T00:00\n  &q=\n    name,sps,:eq\n  &s=e-2d\n  &tz=US/Eastern\n  &tz=US/Pacific\n  &tz=UTC\n

    "},{"location":"api/graph/time-zone/#daylight-savings-time","title":"Daylight Savings Time","text":"

    If using a time zone that changes for daylight savings time, then you will see duplicate or missing hours on the time axis labels during the transition period. For example, a duplicate hour:

    /api/v1/graph?\n  e=2015-11-01T08:00\n  &q=\n    name,sps,:eq\n  &s=e-12h\n  &tz=US/Pacific\n  &tz=UTC\n

    A missing hour:

    /api/v1/graph?\n  e=2015-03-08T08:00\n  &q=\n    name,sps,:eq\n  &s=e-12h\n  &tz=US/Pacific\n  &tz=UTC\n

    If looking at a longer time frame, then it can also throw off the alignment so ticks will not be on significant time boundaries, e.g.:

    /api/v1/graph?\n  e=2015-11-05T08:00\n  &q=\n    name,sps,:eq\n  &s=e-1w\n  &tz=US/Pacific\n  &tz=UTC\n

    "},{"location":"api/graph/vision/","title":"Color Blindness","text":"

    The vision parameter can be used to simulate different types of color blindness. Permitted values are:

    • normal
    • protanopia
    • protanomaly
    • deuteranopia
    • deuteranomaly
    • tritanopia
    • tritanomaly
    • achromatopsia
    • achromatomaly
    "},{"location":"api/graph/vision/#normal","title":"Normal","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=normal\n
    "},{"location":"api/graph/vision/#protanopia","title":"Protanopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=protanopia\n
    "},{"location":"api/graph/vision/#protanomaly","title":"Protanomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=protanomaly\n
    "},{"location":"api/graph/vision/#deuteranopia","title":"Deuteranopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=deuteranopia\n
    "},{"location":"api/graph/vision/#deuteranomaly","title":"Deuteranomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=deuteranomaly\n
    "},{"location":"api/graph/vision/#tritanopia","title":"Tritanopia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=tritanopia\n
    "},{"location":"api/graph/vision/#tritanomaly","title":"Tritanomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=tritanomaly\n
    "},{"location":"api/graph/vision/#achromatopsia","title":"Achromatopsia","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=achromatopsia\n
    "},{"location":"api/graph/vision/#achromatomaly","title":"Achromatomaly","text":"
    /api/v1/graph?\n  e=2012-01-01T09:00\n  &no_legend=1\n  &q=\n    1,1,1,1,1,1,1\n  &stack=1\n  &tz=UTC\n  &vision=achromatomaly\n
    "},{"location":"asl/","title":"Index","text":"

    The asl-finetuning.tsv file is a collection of ChatGPT training data manually extracted from the Markdown files in this section of the repo, that can be converted to a format suitable for helping ChatGPT learn about Atlas Stack Language.

    See the Fine-tuning section of the OpenAI documentation for more details.

    "},{"location":"asl/alerting-expressions/","title":"Alerting Expressions","text":"

    The stack language provides some basic techniques to convert an input line into a set of signals that can be used to trigger and visualize alert conditions. This section assumes a familiarity with the stack language and the alerting philosophy.

    "},{"location":"asl/alerting-expressions/#signal-line","title":"Signal Line","text":"

    A signal line is a time series that indicates whether or not a condition is true for a particular interval. They are modelled by having zero indicate false and non-zero, typically 1, indicating true. Alerting expressions map some input time series to a set of signal lines that indicate true when in a triggering state.

    "},{"location":"asl/alerting-expressions/#threshold-alerts","title":"Threshold Alerts","text":"

    To start we need an input metric. For this example the input will be a sample metric showing high CPU usage for a period:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum\n

    Lets say we want to trigger an alert when the CPU usage goes above 80%. To do that simply use the :gt operator and append 80,:gt to the query:

    The result is a signal line that is non-zero, typically 1, when in a triggering state and zero when everything is fine.

    "},{"location":"asl/alerting-expressions/#dampening","title":"Dampening","text":"

    Our threshold alert above will trigger if the CPU usage is ever recorded to be above the threshold. Alert conditions are often combined with a check for the number of occurrences. This is done by using the :rolling-count operator to get a line showing how many times the input signal has been true withing a specified window and then applying a second threshold to the rolling count.

    InputRolling CountDampened Signal
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt\n
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt,\n5,:rolling-count\n
    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:gt,\n5,:rolling-count,\n4,:gt\n
    "},{"location":"asl/alerting-expressions/#visualization","title":"Visualization","text":"

    A signal line is useful to tell whether or not something is in a triggered state, but can be difficult for a person to follow. Alert expressions can be visualized by showing the input, threshold, and triggering state on the same graph.

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and,\n:sum,\n80,:2over,\n:gt,\n:vspan,\n40,:alpha,\ntriggered,:legend,\n:rot,\ninput,:legend,\n:rot,\nthreshold,:legend,\n:rot\n
    "},{"location":"asl/alerting-expressions/#summary","title":"Summary","text":"

    You should now know the basics of crafting an alert expression using the stack language. Other topics that may be of interest:

    • Alerting Philosophy: overview of best practices associated with alerts.
    • Stack Language Tutorial: comprehensive list of available operators.
    • DES: double exponential smoothing. A technique for detecting anomalies in normally clean input signals where a precise threshold is unknown. For example, the requests per second hitting a service.
    "},{"location":"asl/alerting-philosophy/","title":"Alerting Philosophy","text":"

    It is recommended for all alerts to adhere to the follow guidelines:

    1. Keep conditions simple.
    2. Alerts should be actionable.
    3. Check for measured failure on critical paths rather than a lack of success.
    4. Alerts should not have special cases for routine maintenance.
    5. Consider how the alert check can fail.
    "},{"location":"asl/alerting-philosophy/#keep-it-simple","title":"Keep It Simple","text":"

    When an alert triggers, it should be easy to understand why. Similarly, if an alert doesn't fire, then it should be easy to check and see what happened. The more complicated an alert condition becomes, the harder it is to understand and debug.

    It is recommended to keep alert rules as a simple expression with a threshold and number of occurrences. An example of this is the following rule:

    CPU Usage > 80% for at least 5 minutes\n

    Multiple signals should only be combined if it improves the effectiveness of the alert. For example, what is an appropriate threshold for the number of requests that have error responses? What happens to that threshold if your cluster auto-scales? It is more effective to define the threshold as a percentage of total requests:

    (Num Errors / Num Total) > 0.01 for at least 5 minutes\n

    In some cases, a low volume can make the percentages less meaningful and result in false positives. For example, if your daily traffic pattern follows a sine curve, then the troughs may not represent a meaningful error percentage. Another example might be during failover exercises, if traffic has been failed over to another cluster. One way to compensate for this is to check the failure rate and overall volume:

    Percentage of Failures > X AND Volume > Y\n

    As a general rule, bias towards simplicity. If you are creating more complex expressions, then stop and think about why that complexity is needed. Are there other signals available that are easier to use? Can the application be changed so that it reports metrics which make it easier to diagnose?

    "},{"location":"asl/alerting-philosophy/#actionable-alerts","title":"Actionable Alerts","text":"

    If an alert fires and sends a notification to users, someone should be motivated to investigate the problem. Alerts that are noisy or not actionable train people to ignore or filter out alert notifications.

    For cases where the response to an alert can be automated, such as terminating a bad instance, it shouldn't send out a notification unless there is a failure to perform the action. If you want a summary of cluster health, then use dashboards or reporting tools for this function; don't attempt to do this via alert notifications.

    Alerts should check something important. To setup effective alerts, you need to understand the application and have ways to detect failures for critical functionality. Avoid general system-type alerts that won't be investigated. For example, should you alert on high CPU usage? If you have done squeeze testing and you have information to indicate how CPU usage impacts the application, then it can be useful and it will provide a way to know a problem is coming before it impacts clients of the service. If you do not have this knowledge, then your alert may be under-tuned, leading to noisy notifications that may be ignored.

    "},{"location":"asl/alerting-philosophy/#check-for-measured-failure","title":"Check for Measured Failure","text":"

    It is better to check for failures rather than trying to trigger based on an absence of information or a reduction in the amount of success.

    "},{"location":"asl/alerting-philosophy/#absence-of-information","title":"Absence of Information","text":"

    A typical example of this is a process that runs over a longer time period. For example, suppose we have an application that updates a metadata cache once per day and it takes an hour to refresh. It is not recommended to send an event on refresh success and then configure alerts based on the absence of the success event. Design the signals so you have a clear way to understand what error conditions may be occurring on and then alert if there is a problem.

    In this example, a better design would use a gauge that reports the loading time and a gauge that reports the age of the cache. You can then add alerts when the gauges for these error conditions exceed unacceptable thresholds.

    "},{"location":"asl/alerting-philosophy/#reduction-in-success","title":"Reduction in Success","text":"

    Let's say we have a server that is taking traffic and we want to know if users are experiencing problems. How should we go about this? It is often tempting to look for things like a drop in the number of successful requests, because this can be a generic catch-all for many types of problems.

    However, alerts of this sort are inherently noisy. How do you know what the number of requests should be? While there are various schemes for trying to predict the behavior, you will spend a lot of time tuning alerts of this nature to get them to the point where they are not too noisy, but they still catch real issues. Further, these schemes cannot differentiate between problems for the service and unrelated drops such as a client having problems and failing to make the request in the first place.

    If you're not going to investigate these alerts when they fire or invest in tuning and maintaining them, just avoid this type of alert altogether.

    A better approach is to alert on the number of failures you are seeing from a service. Thresholds can often be determined automatically by looking at the percent of all requests that are failures. For middle tier services, it is also likely that data from the clients can be used to see a percentage of failure from a client perspective instead of, or in addition to, the server side view.

    "},{"location":"asl/alerting-philosophy/#avoid-special-cases","title":"Avoid Special Cases","text":"

    Alerts shouldn't have to be tuned or suppressed during regular maintenance such as replacing instance or doing deployments. As a simple example, consider an alert on the rate of failures. The general assumption would be that a deployment should not be noticed by clients and therefore the alert is still relevant. Alerts that are actionable and look for measured failure tend to work well. If a new instance is coming up, a lack of activity will mean a lack of failures until traffic is being received. At that time if there are failures they should be noticed.

    "},{"location":"asl/alerting-philosophy/#startup-behavior","title":"Startup Behavior","text":"

    What about different behavior during startup? Consider some examples for an application that has a long initialization time (~20 minutes) before it can take traffic:

    • Discovery service state during initialization.
    • Healthcheck failures during initialization.
    • Performance may be different while starting. CPU usage is high while initializing but stabilizes and remains low during normal operation.

    For a discovery service like Eureka, the duration of the startup time shouldn't be an issue because the state clearly indicates if it is STARTING vs DOWN.

    If the healthcheck is used for a load balancer, then the decision to send traffic to instances should be fairly sensitive in order to minimize the impact to users. The bigger concern is the number of occurrences of healthcheck failures in a row, which can trigger automated actions like terminating an instance. When evaluating healthcheck failures, there are two distinct conditions to evaluate: non-200 responses and connection timeouts.

    The healthcheck logic should be tied to the Eureka heartbeat so that if the healthcheck is failing due to a non-200 response, the discovery state will be DOWN after initialization is complete. For the first condition, the alert should check for the number of occurrence of the DOWN state in the discovery service which will not trigger for the STARTING state used during application initialization.

    For the second condition, you would need to check for a disparity between the published discovery state and the healthcheck state:

    (DiscoveryStatus is UP) AND (Healthcheck != 200) for N minutes\n

    Note, unless you really need to do this it is probably better to just look at the healthcheck and have the num occurrences set to be longer than the typical startup time.

    For the CPU example, first reconsider whether general system check alerts are actually useful. Is it going to help you catch a real problem and be investigated when it triggers? If not, don't setup an alert on CPU and rely on alerts that check for failures on the critical path.

    If it is useful and you have squeeze testing results or other information so you know when a proxy metric like CPU actually indicates a problem, then you can configure it restricted with some signal that indicates the status. However, keep in mind that not all systems will allow complex expressions. For example, if you are auto-scaling will you be able to send the data such that it doesn't incorrectly skew the alarm? The more signals that are combined the harder it is to understand the alert and the more likely it is to fail in unexpected ways. Before adding more layers of duct tape think hard about the application and if you can change it to be easier to monitor and diagnose.

    "},{"location":"asl/alerting-philosophy/#deployments","title":"Deployments","text":"

    At Netflix, a common deployment model is red/black. In this model, a new auto-scaling group the same size as the existing one will be created, traffic will transition over, and eventually the old auto-scaling group (ASG) will be deleted. This can create false alarms if you haven't thought about the signals being used to fire alerts.

    The most common alerting problem that occurs during deployments is related the use of averages. For example, the average request rate will drop in half if a new ASG comes up and you are aggregating across a cluster consisting of both old and new ASGs. If you follow the advice given earlier about crafting alerts based on a percentage of errors reported by clients of the application, then aggregating across clusters by sum usually won't be a problem. If the deployment is going well, then the overall failure rate seen by clients shouldn't be impacted.

    Another example of a deployment alerting problem is latency measurements. How can you tell the average latency across a cluster composed of new and old ASGs? Rather than trying to special case or exclude the new group of instances, you should define the alert signal based on the actual activity seen. If there is no activity within an ASG, then it will not impact the signal.

    Metrics libraries like Spectator send both a totalTime and count measurement separately to the backend. This allows the average to be computed using a simple sum aggregate with division:

    Sum(totalTime per instance in cluster) / Sum(count per instance in cluster)\n

    This calculation demonstrates how instances that are not receiving traffic will not contribute anything to the sums.

    "},{"location":"asl/alerting-philosophy/#think-about-failure","title":"Think About Failure","text":"

    An effective alert needs to be able to fire when there is a problem. However, when problems occur, it is possible that the problem will also impact the underlying data or mechanisms used to detect issues for the alert. It is worthwhile to spend time thinking about the ways in which your alerts can fail to detect events.

    "},{"location":"asl/alerting-philosophy/#how-can-signals-fail","title":"How Can Signals Fail?","text":"

    The simplest area to think about is what is collecting and reporting the data. For example, if data is being reported by the plugin running in the application, then it won't work if the application crashes or cannot start. It is recommended to have some basic alerts using a data pipeline that will fail independently from the application. At Netflix, this typically involves checking the following conditions:

    • The healthcheck is responding with 200. This signal indicates that a remote system was able to connect and query the application healthcheck. So the application is running and inbound traffic made it in.
    • The application is registered with Eureka. Eureka uses a heartbeat mechanism, so checking the registration tells you the application is running and it is able to successfully send the heartbeat request.

    The metric data for those signals comes from a separate poller application. If these succeed, then the application should be healthy enough that alerts triggered from data local to the instance should be working.

    "},{"location":"asl/alerting-philosophy/#alerting-scopes","title":"Alerting Scopes","text":"

    At Netflix, alert expressions for Atlas can be checked in three places:

    • Backend. Alerts checked against the main backend server.
    • Plugin. Alerts are checked by the plugin running on the instance.
    • Poller. Alerts are checked by a poller service that collects data about instances.

    In practice, for a given application, the alerting scopes look like:

    Alerting scopes can be used to provide some level of redundancy with different failure modes. For example, the failure rate could be checked against the server stats and the client stats. Further, it is recommended to check alerts as close as possible to where the data is initially measured and collected. In other words, it is better to check the alerts on the plugin or poller rather than against the backend. The advantages of doing this are:

    • Lower mean-time to detection (MTTD). Data going to the backend server has to be handled by several layers, which need to allow time for data from all nodes to arrive, time to index, etc. Alerts checked locally using the plugin or poller will get checked as data is being published and so they can trigger at the same time that data would hit the first step in the backend data pipeline.
    • More robust to failure. When there are problems with the monitoring backends, server side alerts won't work or may have incorrect or partial data. Alerts checked locally on the plugin are immune to all problems off the instance other than being able to forward to the alert server. If the Atlas plugin or the instance running it are having issues, then it is likely that problems for the local alert check would also impact publishing, so server side alerts are not likely to provide a better view. Also, keep in mind that for most middle-tier services, the alert can be checked on the instances that call the service and thus can still fire if the instance has crashed. High-level instance health can be verified by an alert checked on the poller.
    • Scales better as the amount of data and number of alerts increases. Many alerts, in particular if checked per node, require expensive queries to run on the backends. By checking alert using the plugin, the computation is spread out so each node is checking the alerts for that instance.

    So why not check all alerts on the client or poller? The primary disadvantages:

    • Client and poller scopes can only use data that is available at that location. For a client, that means only the data that is reported by the plugin on that instance. For the poller, it means only data about health checks, discovery, and system stats from SNMP.
    • Data cannot be aggregated across nodes for the cluster. This can make it harder to do things like outlier detection using a cluster-level aggregate as a baseline. However, keep in mind that for middle-tier services there is often an option to check on the plugin for the client.
    "},{"location":"asl/des/","title":"Double Exponential Smoothing","text":"

    Double exponential smoothing (DES) is a simple technique for generating a smooth trend line from another time series. This technique is often used to generate a dynamic threshold for alerting.

    Warning

    Alerts on dynamic thresholds should be expected to be noisy. They are looking for strange behavior rather than an actual problem causing impact. Make sure you will actually spend the time to tune and investigate the alarms before using this approach. See the alerting philosophy guide for more information on best practices.

    "},{"location":"asl/des/#tuning","title":"Tuning","text":"

    The :des operator takes 4 parameters:

    • An input time series
    • training - the number of intervals to use for warming up before generating an output
    • alpha - is a data smoothing factor
    • beta - is a trend smoothing factor

    Note

    For most use cases, the sliding variant of DES, :sdes, should be used instead.

    "},{"location":"asl/des/#training","title":"Training","text":"

    The training parameter defines how many intervals to allow the DES to warmup. In the graph below the gaps from the start of the chart to the smoothed lines reflects the training window used:

    Typically a training window of 10 has been sufficient as DES will adjust to the input fairly quick. However, in some cases if there is a massive change in the input it can cause DES to oscillate, for example:

    "},{"location":"asl/des/#alpha","title":"Alpha","text":"

    Alpha is the data smoothing factor. A value of 1 means no smoothing. The closer the value gets to 0 the smoother the line should get. Example:

    "},{"location":"asl/des/#beta","title":"Beta","text":"

    Beta is a trend smoothing factor. Visually it is most apparent when alpha is small. Example with alpha = 0.01:

    "},{"location":"asl/des/#recommended-values","title":"Recommended Values","text":"

    Experimentally we have converged on 3 sets of values based on how quickly it should adjust to changing levels in the input signal.

    Helper Alpha Beta :des-fast 0.1 0.02 :des-slower 0.05 0.03 :des-slow 0.03 0.04

    Here is an example of how they behave for a sharp drop and recovery:

    For a more gradual drop:

    If the drop is smooth enough then DES can adjust without ever triggering.

    "},{"location":"asl/des/#alerting","title":"Alerting","text":"

    For alerting purposes the DES line will typically get multiplied by a fraction and then checked to see whether the input line drops below the DES value for a given interval.

    # Query to generate the input line\nnf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,:and,\n:sum,\n\n# Create a copy on the stack\n:dup,\n\n# Apply a DES function to generate a prediction\n:des-fast,\n\n# Used to set a threshold. The prediction should\n# be roughly equal to the line, in this case the\n# threshold would be 85% of the prediction.\n0.85,:mul,\n\n# Create a boolean signal line that is 1\n# for datapoints where the actual value is\n# less than the prediction and 0 where it\n# is greater than or equal the prediction.\n# The 1 values are where the alert should\n# trigger.\n:lt,\n\n# Apply presentation details.\n:rot,$name,:legend,\n

    The vertical spans show when the expression would have triggered with due to the input dropping below the DES line at 85%:

    "},{"location":"asl/des/#epic-macros","title":"Epic Macros","text":"

    There are two helper macros, des-epic-signal and des-epic-viz, that match the behavior of the previous epic DES alarms. The first generates a signal line for the alarm. The second creates a visualization to make it easier to see what is happening. Both take the following arguments:

    • line - input line
    • trainingSize - training size parameter for DES
    • alpha - alpha parameter for DES
    • beta - beta parameter for DES
    • maxPercent - percentage offset to use for the upper bound. Can be set to NaN to disable the upper bound check.
    • minPercent - percentage offset to use for the lower bound. Can be set to NaN to disable the lower bound check.
    • noise - a fixed offset that is the minimum difference between the signal and prediction that is required before the signal should trigger. This is primarily used to avoid false alarms where the percentage bound can become ineffective for routine noise during the troughs.

    Examples:

    nf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,\n:and,\n:sum,\n10,0.1,0.02,0.15,0.15,10,:des-epic-viz\n

    Example with no lower bound:

    nf.cluster,alerttest,:eq,\nname,requestsPerSecond,:eq,\n:and,\n:sum,\n10,0.1,0.02,0.15,NaN,10,:des-epic-viz\n
    "},{"location":"asl/tutorial/","title":"Tutorial","text":"

    Atlas Stack Language is designed to be a stable method of representing complex data queries in a URL-friendly format. It is loosely based on the RPN expressions supported by Tobias Oetiker's rrdtool. The following is an example of a stack language expression:

    nf.cluster,discovery,:eq,(,nf.zone,),:by

    This example pushes two strings nf.cluster and discovery onto the stack and then executes the command :eq. The equal command pops two strings from the stack and pushes a query object onto the stack. The behavior can be described by the stack effect String:key String:value \u2013 Query. We then push a list of tag keys to the stack and execute the command :by to group the results.

    "},{"location":"asl/tutorial/#parts","title":"Parts","text":"

    There are only four reserved symbols used for structuring the expression: ,:()

    1. Commas separate items on the stack. So a,b puts two strings on the stack with values \"a\" and \"b\".
    2. Colon is used to prefix operations. If the first character is a colon the item will be treated as a command to run. For example, a,:dup, will push \"a\" on the stack and then execute the duplicate operation.
    3. Parenthesis are used to indicate the start and end of a list. The expression (,) puts an empty list on the stack. Commands inside of a list will not be executed unless the list is passed to the call command. For example, (,:dup,) will push a list with a single string value of \":dup\" on to the stack.
    "},{"location":"asl/tutorial/#data-model","title":"Data Model","text":"

    The stack language is primarily used for representing expressions over tagged time series data. A tag is a string key value pair used to describe a measurement. Atlas requires at least one tag with a key of name. Example tags represented as a JSON map:

    {\n  \"name\":       \"jvm.gc.pause\",\n  \"cause\":      \"Allocation_Failure\",\n  \"statistic\":  \"count\",\n  \"nf.app\":     \"www\",\n  \"nf.cluster\": \"www-main\",\n  \"nf.asg\":     \"www-main-v001\",\n  \"nf.stack\":   \"main\",\n  \"nf.node\":    \"i-01\",\n  \"nf.region\":  \"us-east-1\",\n  \"nf.zone\":    \"us-east-1a\"\n}\n

    Typically, tags should be dimensions that allow you to use the name as a pivot and other tags to drill down into the data. The tag keys are similar to columns in a traditional table, however, it is important to note that not all time series will have the same set of tag keys.

    The tags are used to identify a time series, which conceptually is a set of timestamp value pairs. Here is a simplified data set shown as a table:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)] cpuUsage www i-02 [(05:00, 20.0), (05:01, 37.0)] cpuUsage db i-03 [(05:00, 57.0), (05:01, 62.0)] diskUsage www i-01 [(05:00, 9.0), (05:01, 9.0)] diskUsage www i-02 [(05:00, 7.0), (05:01, 8.0)] requestRate www [(05:00, 33.0), (05:01, 31.0)]

    The table above will be used for the examples in later sections.

    "},{"location":"asl/tutorial/#simple-expressions","title":"Simple Expressions","text":"

    All expressions generally have four parts:

    1. Choosing: selects a set of time series.
    2. Aggregation: defines how to combine the selected time series.
    3. Math: manipulate the time series values or combine aggregated results with binary operations.
    4. Presentation: adjust how the data is presented in a chart.
    "},{"location":"asl/tutorial/#choosing","title":"Choosing","text":"

    The \"choosing\" or predicate section is used to select a set of time series. The primary predicate operators are :eq and :and.

    Sample query to select all time series where the key node is equal to i-01:

    node,i-01,:eq\n

    If you are familiar with SQL and assume that tag keys are column names, then this would be equivalent to:

    select * from time_series where node = 'i-01';\n

    Using the example data set this query would return the following subset:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)] diskUsage www i-01 [(05:00, 9.0), (05:01, 9.0)]

    To get just the cpu usage for that node, use :and:

    node,i-01,:eq,\nname,cpuUsage,:eq,\n:and\n

    This would result in:

    name app node values cpuUsage www i-01 [(05:00, 33.0), (05:01, 31.0)]"},{"location":"asl/tutorial/#aggregation","title":"Aggregation","text":"

    An aggregation function maps a set of time series that matched the predicate to a single time series. Atlas supports four aggregate functions: sum, min, max, and count. If no aggregate is specified on an expression, then sum will be used implicitly.

    Using the example data set, these two expressions would be equivalent:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and\n
    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:sum\n

    And would result in a single output time series:

    name app values cpuUsage www [(05:00, 53.0), (05:01, 68.0)]

    Note that the node is not present in the output. The set of tags on the output will be ones with exact matches in the predicate clause or explicitly listed in the group by.

    If you wanted the max cpu for the application, then you would write:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:max\n

    What if we want the average? The count aggregate is used to determine how many time series had a value for a given time. To get the average we divide the sum by the count.

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:dup,\n:sum,\n:swap,\n:count,\n:div\n

    There is a helper macro :avg that will do this for you, so you can write:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg\n
    "},{"location":"asl/tutorial/#group-by","title":"Group By","text":"

    In many cases we want to group the results that were selected and return one aggregate per group. As an example suppose I want to see maximum cpu usage by application:

    name,cpuUsage,:eq,\n:max,\n(,app,),:by\n

    Using the example data set, this would result in a two output time series:

    name app values cpuUsage www [(05:00, 33.0), (05:01, 37.0)] cpuUsage db [(05:00, 57.0), (05:01, 62.0)]"},{"location":"asl/tutorial/#math","title":"Math","text":"

    Once you have a set of lines, it can be useful to manipulate them. The supported operations generally fall into two categories: unary operations to alter a single time series and binary operations that combine two time series.

    Examples of unary operations are negate and absolute value. To apply the absolute value:

    app,web,:eq,\nname,cpu,:eq,\n:and,\n:sum,\n:abs\n

    Multiple operations can be applied, for example, negating the line then applying the absolute value:

    app,web,:eq,\nname,cpu,:eq,\n:and,\n:sum,\n:neg,\n:abs\n

    Common binary operations are add, subtract, multiply, and divide. The aggregation section has an example of using divide to compute the average.

    "},{"location":"asl/tutorial/#presentation","title":"Presentation","text":"

    Once you have a final expression, you can apply presentation settings to alter how a time series is displayed in the chart. One of the most common examples is setting the label to use for the legend:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\naverage+cpu+usage,:legend\n

    You can also use tag keys as variables in the legend text, for example, setting the legend to the application:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\n(,app,),:by,\n$(app),:legend\n

    It is also common to adjust the how the lines are shown. For example, to stack each of the lines we can use the :stack command to adjust the line style:

    app,www,:eq,\nname,cpuUsage,:eq,\n:and,\n:avg,\n(,app,),:by,\n:stack,\n$(app),:legend\n
    "},{"location":"asl/ref/-rot/","title":"-rot","text":"Input Stack:ba... \u21e8 Output Stack:a...b

    Rotate the stack so that the item at the top is now at the bottom.

    Example:

    a,b,c,d,:-rot\n
    PosInputOutput 0 d c 1 c b 2 b a 3 a d"},{"location":"asl/ref/2over/","title":"2over","text":"Input Stack:ba \u21e8 Output Stack:baba

    Shorthand equivalent to writing: :over,:over

    Example:

    a,b,:2over\n
    PosInputOutput 0 b b 1 a a 2 b 3 a"},{"location":"asl/ref/abs/","title":"abs","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the absolute value of the input time series.

    Examples:

    064-64
    0,:abs\n
    64,:abs\n
    -64,:abs\n
    "},{"location":"asl/ref/add/","title":"add","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 + ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a addNaN b) where a and b are the corresponding intervals in the input time series. Sample:

    :add 3.0 0.0 1.0 1.0 NaN Input 1 1.0 0.0 1.0 1.0 NaN Input 2 2.0 0.0 0.0 NaN NaN

    Use the fadd operator to get strict floating point behavior.

    Examples

    Example adding a constant:

    BeforeAfter
    name,sps,:eq,\n30e3\n
    name,sps,:eq,\n30e3,:add\n

    Example adding two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:add\n
    "},{"location":"asl/ref/all/","title":"all","text":"

    Warning

    Deprecated: use :by instead. This operation is primarily intended for debugging and results can be confusing unless you have detailed understanding of Atlas internals.

    Input Stack:Query \u21e8 Output Stack:DataExpr

    Avoid aggregation and output all time series that match the query.

    "},{"location":"asl/ref/alpha/","title":"alpha","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the alpha value for the colors on the line. The value should be a two digit hex number where 00 is transparent and ff is opague. This setting will be ignored if the color setting is used for the same line.

    BeforeAfter
    name,sps,:eq,\n:sum,\n:stack\n
    name,sps,:eq,\n:sum,\n:stack,\n40,:alpha\n
    BeforeAfter
    name,sps,:eq,\n:sum,\n:stack,\nf00,:color\n
    name,sps,:eq,\n:sum,\n:stack,\nf00,:color,\n40,:alpha\n
    "},{"location":"asl/ref/and/","title":"and","text":"

    There are two variants of the :and operator.

    "},{"location":"asl/ref/and/#choosing","title":"Choosing","text":"Input Stack:q2: Queryq1: Query \u21e8 Output Stack:(q1 AND q2): Query

    This first variant is used for choosing the set of time series to operate on. It is a binary operator that matches if both of the sub-queries match. For example, consider the following query:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:and\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/and/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 AND ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a AND b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a AND b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for all intervals where the corresponding values of a and b are both non-zero. Example:

    BeforeAfter
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n310,:lt\n
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n310,:lt,\n:and\n
    "},{"location":"asl/ref/area/","title":"area","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be area. In this mode the line will be filled to 0 on the Y-axis.

    See the line style examples page for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:area\n
    "},{"location":"asl/ref/as/","title":"as","text":"Input Stack:replacement: Stringoriginal: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Map a tag key name to an alternate name. This can be useful for cases where it is desirable to perform a binary math operation, but the two sides use different tag keys for the same concept. The common IPC metrics are an example where it might be desirable to compare RPS for servers and their clients. The server side RPS would group by nf.app while the client side view would group by ipc.server.app.

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nnf.cluster,c,:as,\n$c,:legend\n
    "},{"location":"asl/ref/avg/","title":"avg","text":"

    Average or mean aggregation operator. There are two variants of the :avg operator.

    "},{"location":"asl/ref/avg/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper method that computes the average or mean from one or more time series using the count aggregate to determine how many time series have data at an interval and dividing the sum of the values by the count. This avoids issues where one or time series are missing data at a specific time resulting in an artificially low average. E.g. the expression:

    name,ssCpuUser,:eq,\n:avg\n

    when matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the average. This leads to a final result of:

    NameData ssCpuUser [3.33, 3.66, 4.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/avg/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the average of all the time series from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by,\n:avg\n
    "},{"location":"asl/ref/axis/","title":"axis","text":"Input Stack:IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Specify which Y-axis to use for the line. The value specified is the axis number and should be an integer in the range 0 to 4 inclusive.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n42\n
    name,sps,:eq,\n:sum,\n42,1,:axis\n
    "},{"location":"asl/ref/bottomk-others-avg/","title":"bottomk-others-avg","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes an average aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-avg\n
    "},{"location":"asl/ref/bottomk-others-max/","title":"bottomk-others-max","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a max aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-max\n
    "},{"location":"asl/ref/bottomk-others-min/","title":"bottomk-others-min","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a min aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-min\n
    "},{"location":"asl/ref/bottomk-others-sum/","title":"bottomk-others-sum","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic and computes a sum aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk-others-sum\n
    "},{"location":"asl/ref/bottomk/","title":"bottomk","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the smallest value for the specified summary statistic. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:bottomk\n

    In some cases it can be useful to see an aggregate summary of the other time series that were not part of the bottom set. This can be accomplished using the :bottomk-others-$(aggr) operators. For more details see:

    • :bottomk-others-avg
    • :bottomk-others-max
    • :bottomk-others-min
    • :bottomk-others-sum
    "},{"location":"asl/ref/by/","title":"by","text":"

    Group by operator. There are two variants of the :by operator.

    "},{"location":"asl/ref/by/#aggregation","title":"Aggregation","text":"Input Stack:keys: List[String]AggregationFunction \u21e8 Output Stack:DataExpr

    Groups the matching time series by a set of keys and applies an aggregation to matches of the group.

    name,ssCpu,:re,\n(,name,),:by\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The aggregation function will be applied independently for each group. In this example above there are two matching values for the group by key name. This leads to a final result of:

    NameData ssCpuSystem [9.0, 11.0, 13.0] ssCpuUser [10.0, 11.0, 8.0]

    The name tag is included in the result set since it is used for the grouping.

    "},{"location":"asl/ref/by/#math","title":"Math","text":"Input Stack:keys: List[String]TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Groups the time series from the input expression by a set of keys and applies an aggregation to matches of the group. The keys used for this grouping must be a subset of keys from the initial group by clause. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,nf.node,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,nf.node,),:by,\n:count,\n(,nf.cluster,),:by\n
    "},{"location":"asl/ref/call/","title":"call","text":"Input Stack:?List \u21e8 Output Stack:?

    Pops a list off the stack and executes it as a program.

    Example:

    (,a,),:call\n
    Pos Input Output 0 List(a) a"},{"location":"asl/ref/cf-avg/","title":"cf-avg","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be average.

    "},{"location":"asl/ref/cf-max/","title":"cf-max","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be max.

    "},{"location":"asl/ref/cf-min/","title":"cf-min","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be min.

    "},{"location":"asl/ref/cf-sum/","title":"cf-sum","text":"Input Stack:AggregationFunction \u21e8 Output Stack:AggregationFunction

    Force the consolidation function to be sum.

    "},{"location":"asl/ref/cg/","title":"cg","text":"Input Stack:keys: List[String]Expr \u21e8 Output Stack:Expr

    Recursively add a list of keys to group by expressions. This can be useful for tooling that needs to adjust existing expressions to include keys in the grouping.

    BeforeAfter
    name,sps,:eq,\n(,nf.app,),:by\n
    name,sps,:eq,\n(,nf.app,),:by,\n(,nf.cluster,),:cg\n
    "},{"location":"asl/ref/clamp-max/","title":"clamp-max","text":"Input Stack:DoubleTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Restricts the maximum value of the output time series to the specified value. Values from the input time series that are less than or equal to the maximum will not be changed.

    A common use-case is to allow for auto-scaled axis up to a specified bound. The axis parameters for controlling the axis bounds have the following limitations:

    • They apply to everything on the axis and cannot be targeted to a specific line.
    • Are either absolute or set based on the data. For data with occasional spikes this can hide important details.

    Consider the following graph:

    The spike makes it difficult to make out any detail for other times. One option to handle this is to use an alternate axis scale such as logarithmic that gives a higher visual weight to the smaller values. However, it is often easier for a user to reason about a linear scale, in particular, for times when there is no spike in the graph window. If there is a known max reasonable value, then the :clamp-max operator can be used to restrict the line if and only if it exceeds the designated max. For example, if we limit the graph above to 25:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n60e3,:clamp-max\n
    "},{"location":"asl/ref/clamp-min/","title":"clamp-min","text":"Input Stack:DoubleTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Restricts the minimum value of the output time series to the specified value. Values from the input time series that are greater than or equal to the minimum will not be changed. A common use-case is to allow for auto-scaled axis up to a specified bound. For more details see :clamp-max.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n200e3,:clamp-min\n
    "},{"location":"asl/ref/clear/","title":"clear","text":"Input Stack:... \u21e8 Output Stack:

    Remove all items from the stack.

    Example:

    a,b,c,:clear\n
    PosInputOutput 0 c 1 b 2 a"},{"location":"asl/ref/color/","title":"color","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the color for the line. The value should be one of:

    • Hex triplet, e.g. f00 is red.
    • 6 digit hex RBG, e.g. ff0000 is red.
    • 8 digit hex ARGB, e.g. ffff0000 is red. The first byte is the alpha setting to use with the color.

    For queries with multiple time series, color palettes are available to automatically assign different colors to the various series. See Color Palettes.

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\nff0000,:color\n
    "},{"location":"asl/ref/const/","title":"const","text":"Input Stack:Double \u21e8 Output Stack:TimeSeriesExpr

    Generates a line where each datapoint is a constant value. Any double value that is left on the stack will get implicitly converted to a constant line, so this operator is typically not used explicitly.

    BeforeAfter
    42\n
    42,:const\n
    "},{"location":"asl/ref/contains/","title":"contains","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key includes the specified substring. For example, consider the following query:

    name,Cpu,:contains\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/count/","title":"count","text":"

    Count aggregation operator. There are two variants of the :count operator.

    "},{"location":"asl/ref/count/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Compute the number of time series that match the query and have a value for a given interval.

    name,ssCpuUser,:eq,\n:count\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the count. This leads to a final result of:

    NameData ssCpuUser [3.0, 3.0, 2.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/count/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the number of time series from the input expression and have a value for a given interval. Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:count\n
    "},{"location":"asl/ref/cq/","title":"cq","text":"Input Stack:QueryExpr \u21e8 Output Stack:Expr

    Recursively AND a common query to all queries in an expression. If the first parameter is not an expression, then it will be not be modified.

    Example:

    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq,\n:cq\n
    BeforeAfter
    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq\n
    name,ssCpuUser,:eq,\nname,DiscoveryStatus_UP,:eq,\n:mul,\nnf.app,alerttest,:eq,\n:cq\n
    BeforeAfter
    42,nf.app,alerttest,:eq\n
    42,nf.app,alerttest,:eq,\n:cq\n
    "},{"location":"asl/ref/decode/","title":"decode","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Note

    It is recommended to avoid using special symbols or trying to encode structural information into tag values. This feature should be used sparingly and with great care to ensure it will not result in a combinatorial explosion.

    Perform decoding of the legend strings. Generally data going into Atlas is restricted to simple ascii characters that are easy to use as part of a URI. Most commonly the clients will convert unsupported characters to an _. In some case it is desirable to be able to reverse that for the purposes of presentation.

    • none: this is the default. It will not modify the legend string.
    • hex: perform a hex decoding of the legend string. This is similar to url encoding except that the _ character is used instead of % to indicate the start of an encoded symbol. The decoding is lenient, if the characters following the _ are not valid hexadecimal digits then it will just copy those characters without modification.

    Since: 1.5

    Example:

    Hex to ASCII
    1,one_21_25_26_3F,:legend,\nhex,:decode\n
    "},{"location":"asl/ref/delay/","title":"delay","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Delays the values by the window size. This is similar to the :offset operator except that it can be applied to any input line instead of just changing the time window fetched with a DataExpr. Short delays can be useful for alerting to detect changes in slightly shifted trend lines.

    Since: 1.6

    BeforeAfterCombined
    name,requestsPerSecond,:eq,\n:sum\n
    name,requestsPerSecond,:eq,\n:sum,\n5,:delay\n
    name,requestsPerSecond,:eq,\n:sum,\n:dup,\n5,:delay\n
    "},{"location":"asl/ref/depth/","title":"depth","text":"Input Stack:... \u21e8 Output Stack:Int...

    Push the depth of the stack.

    Since: 1.5.0

    Examples:

    ,:depth\n
    PosInputOutput 0 0
    a,:depth\n
    PosInputOutput 0 a 1 1 a
    a,b,:depth\n
    PosInputOutput 0 b 2 1 a b 2 a"},{"location":"asl/ref/derivative/","title":"derivative","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Opposite of :integral. Computes the rate of change per step of the input time series.

    DerivativeIntegralIntegral Then Derivative
    1,:derivative\n
    1,:integral\n
    1,:integral,\n:derivative\n
    "},{"location":"asl/ref/des-epic-signal/","title":"des-epic-signal","text":"Input Stack:noise: DoubleminPercent: DoublemaxPercent: Doublebeta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for configuring DES in a manner compatible with legacy epic alerts. For more information see the epic macros section of the DES page.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n10,0.1,0.5,0.2,0.2,4,:des-epic-signal\n
    "},{"location":"asl/ref/des-epic-viz/","title":"des-epic-viz","text":"Input Stack:noise: DoubleminPercent: DoublemaxPercent: Doublebeta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for configuring DES in a manner compatible with legacy Epic alerts. For more information see the epic macros section of the DES page.

    Example
    name,sps,:eq,\n:sum,\n10,0.1,0.5,0.2,0.2,4,:des-epic-viz\n
    "},{"location":"asl/ref/des-fast/","title":"des-fast","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to quickly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-fast should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-fast\n
    "},{"location":"asl/ref/des-simple/","title":"des-simple","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using default values.

    Warning

    The values used by this operation are prone to wild oscillations. See recommended values for better options.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-simple\n
    "},{"location":"asl/ref/des-slow/","title":"des-slow","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to slowly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-slow should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-slow\n
    "},{"location":"asl/ref/des-slower/","title":"des-slower","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing DES using settings to slowly adjust to the input line. See recommended values for more information. For most use-cases the sliding DES variant :sdes-slower should be used instead.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:des-slower\n
    "},{"location":"asl/ref/des/","title":"des","text":"Input Stack:beta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Double exponential smoothing. For most use-cases sliding DES should be used instead to ensure a deterministic prediction.

    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum\n
    name,requestsPerSecond,:eq,\n:sum,\n5,0.1,0.5,:des\n
    "},{"location":"asl/ref/dist-avg/","title":"dist-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the average recorded value for timers and distribution summaries. This is calculated by dividing the total amount recorded by the number of recorded values.

    For [Timer] and Distribution Summary metrics, the totalTime (timers) /totalAmount (distributions) and count are collected each time a measurement is taken. If this technique was applied to a request latency metric, then you would have the average latency per request for an arbitrary grouping. These types of metrics have an explicit count based on activity. To get an average per measurement manually:

    statistic,totalTime,:eq,\n:sum,\nstatistic,count,:eq,\n:sum,\n:div\n

    This expression can be bound to a query using the :cq (common query) operator:

    statistic,totalTime,:eq,\n:sum,\nstatistic,count,:eq,\n:sum,\n:div,\nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:cq\n

    Using the :dist-avg function reduces the query to:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg\n

    To compute the average by group, apply the group after the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-avg\n
    "},{"location":"asl/ref/dist-max/","title":"dist-max","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the maximum recorded value for timers and distribution summaries. This is a helper for aggregating by the max of the max statistic for the meter.

    A manual query would look like:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\nstatistic,max,:eq,\n:and,\n:max\n

    Using :dist-max the query is reduced to:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-max\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-max\n
    "},{"location":"asl/ref/dist-stddev/","title":"dist-stddev","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Compute the standard deviation for timers and distribution summaries.

    A manual query would look like:

    statistic,count,:eq,\n:sum,\nstatistic,totalOfSquares,:eq,\n:sum,\n:mul,\nstatistic,totalTime,:eq,\n:sum,\n:dup,\n:mul,\n:sub,\nstatistic,count,:eq,\n:sum,\n:dup,\n:mul,\n:div,\n:sqrt,\nnf.cluster,foo,:eq,\n name,http.req.latency,:eq,\n:and,\n:cq\n

    This is much simpler using the :dist-stddev function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-stddev\n
    BeforeAfter
    name,playback.startLatency,:eq\n
    name,playback.startLatency,:eq,\n:dist-stddev\n
    "},{"location":"asl/ref/div/","title":"div","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 / ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a / b) where a and b are the corresponding intervals in the input time series. If a and b are 0, then 0 will be returned for the interval. If only b is 0, then NaN will be returned as the value for the interval. Sample data:

    :div 0.5 0.0 NaN NaN NaN Input 1 1.0 0.0 1.0 1.0 NaN Input 2 2.0 0.0 0.0 NaN NaN

    Use the fdiv operator to get strict floating point behavior.

    Example dividing a constant:

    BeforeAfter
    name,sps,:eq,\n42\n
    name,sps,:eq,\n42,:div\n

    Example adding two series:

    BeforeAfter
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:div\n
    "},{"location":"asl/ref/drop/","title":"drop","text":"Input Stack:a \u21e8 Output Stack:

    Remove the item on the top of the stack.

    Example:

    a,b,c,:drop\n
    PosInputOutput 0 c b 1 b a 2 a
    :drop\n

    Warning

    Throws an exception due to an empty stack.

    "},{"location":"asl/ref/dup/","title":"dup","text":"Input Stack:a: ? \u21e8 Output Stack:a: ?a: ?

    Duplates the item on the top of the stack.

    Example:

    BeforeAfter
    minuteOfDay,:time\n
    minuteOfDay,:time,\n:dup\n
    "},{"location":"asl/ref/each/","title":"each","text":"Input Stack:function: Listitems: List \u21e8 Output Stack:function(items[N-1])...function(items[0])

    Pops a list off the stack and executes it as a program.

    Example:

    (,a,b,),(,:dup,\n),:each\n
    PosInputOutput 0 List(:dup) a 1 List(a, b) a 2 b 3 b"},{"location":"asl/ref/ends/","title":"ends","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key has the specified suffix. For example, consider the following query:

    name,ssCpuUser,:ends\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/eq/","title":"eq","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k == v): Query

    Select time series that have a specified value for a key. For example, consider the following query:

    name,ssCpuUser,:eq\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/eureka-avg/","title":"eureka-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper to compute an average using the number of instances in the UP state based on the discovery.status metric as the denominator. The common infrastructure tags will be used to restrict the scope for the denominator. This operator should be used if the numerator is based on incoming traffic that is routed via the Eureka service and goal is to compute an average per node receiving traffic.

    name,sps,:eq,\nnf.app,nccp,:eq,\n:and,\n:eureka-avg\n
    "},{"location":"asl/ref/fadd/","title":"fadd","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 + ts2): TimeSeriesExpr

    Floating point addition operator. Compute a new time series where each interval has the value (a + b) where a and b are the corresponding intervals in the input time series.

    :fadd 3.0 0.0 1.0 NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. This can lead to confusing behavior if added to a line that does have data as the result will be NaN. Use the add operator to treat NaN values as zero for combining with other time series.

    Example adding a constant:

    BeforeAfter
    name,sps,:eq,\n30e3\n
    name,sps,:eq,\n30e3,:fadd\n

    Example adding two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fadd\n
    "},{"location":"asl/ref/false/","title":"false","text":"Input Stack: \u21e8 Output Stack:Query

    Query expression that will not match any input time series. See also :true.

    "},{"location":"asl/ref/fcall/","title":"fcall","text":"Input Stack:String... \u21e8 Output Stack:?

    Shorthand equivalent to writing: :get,:call

    Example:

    duplicate,(,:dup,\n),:set,\na,duplicate,:fcall\n
    PosInputOutput 0 duplicate a 1 a a"},{"location":"asl/ref/fdiv/","title":"fdiv","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 / ts2): TimeSeriesExpr

    Floating point division operator. Compute a new time series where each interval has the value (a / b) where a and b are the corresponding intervals in the input time series.

    :fdiv 2.0 NaN Inf NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. Zero divided by zero can also occur due to lack of activity in some windows. Unless you really need strict floating point behavior, use the div operator to get behavior more appropriate for graphs.

    Example dividing a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:fdiv\n

    Example dividing two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fdiv\n
    "},{"location":"asl/ref/filter/","title":"filter","text":"Input Stack:TimeSeriesExprTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Filters the results of a grouped expression by another expression. The filter expression is a set of signal time series indicating if the corresponding time series from the original expression should be shown. Simple example that suppresses all lines:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n0,:filter\n

    Filtering is most commonly performed using the summary statistics for the original expression. For example, to show only the lines that have an average value across the query window greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:dup,\navg,:stat,\n5e3,:gt,\n:over,\navg,:stat,\n20e3,:lt,\n:and,\n:filter\n

    There are helpers, :stat-$(name), to express this common pattern more easily for filters. They act as place holders for the specified statistic on the input time series. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria. See the :stat operator for more details on available statistics. For this example, :stat-avg would be used:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-avg,\n5e3,:gt,\n:stat-avg,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/fmul/","title":"fmul","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 * ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a * b) where a and b are the corresponding intervals in the input time series.

    Example multiplying a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:fmul\n

    Example multiplying two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fmul\n
    "},{"location":"asl/ref/format/","title":"format","text":"Input Stack:args: Listpattern: String \u21e8 Output Stack:str: String

    Format a string using a printf style pattern.

    Example:

    foo%s,(,bar,),:format\n
    PosInputOutput 0 List(bar) foobar 1 foo%s"},{"location":"asl/ref/freeze/","title":"freeze","text":"Input Stack:... \u21e8 Output Stack:

    Freeze removes all data from the stack and pushes it to a separate frozen stack that cannot be modified other than to push additional items using the freeze operation. The final stack at the end of the execution will include the frozen contents along with any thing that is on the normal stack.

    This operation is useful for isolating common parts of the stack while still allowing tooling to manipulate the main stack using concatenative rewrite operations. The most common example of this is the :cq operation used to apply a common query to graph expressions. For a concrete example, suppose you want to have an overlay expression showing network errors on a switch that you want to add in to graphs on a dashboard. The dashboard allows drilling into the graphs by selecting a particular cluster. To make this work the dashboard appends a query rewrite to the expression like:

    ,:list,(,nf.cluster,{{ selected_cluster }},:eq,:cq,),:each\n

    This :list operator will apply to everything on the stack. However, this is problematic because the cluster restriction will break the overlay query. Using the freeze operator the overlay expression can be isolated from the main stack. So the final expression would look something like:

    # Query that should be used as is and not modified further\nname,networkErrors,:eq,:sum,50,:gt,:vspan,40,:alpha,\n:freeze,\n\n# Normal contents of the stack\nname,ssCpuUser,:eq,:avg,1,:axis,\nname,loadavg1,:eq,:avg,2,:axis,\n\n# Rewrite appended by tooling, only applies to main stack\n:list,(,nf.cluster,{{ selected_cluster }},:eq,:cq,),:each\n

    Since: 1.6

    Example:

    a,b,c,:freeze\n
    PosInputOutput 0 c c 1 b b 2 a a"},{"location":"asl/ref/fsub/","title":"fsub","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 - ts2): TimeSeriesExpr

    Floating point subtraction operator. Compute a new time series where each interval has the value (a - b) where a and b are the corresponding intervals in the input time series.

    :fsub 1.0 0.0 1.0 NaN NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Note in many cases NaN will appear in data, e.g., if a node was brought up and started reporting in the middle of the time window for the graph. This can lead to confusing behavior if added to a line that does have data as the result will be NaN. Use the sub operator to treat NaN values as zero for combining with other time series.

    Example subtracting a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n30000,:fsub\n

    Example subtracting two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:fsub\n
    "},{"location":"asl/ref/ge/","title":"ge","text":"

    Greater than or equal operator. There are two variants of the :ge operator.

    "},{"location":"asl/ref/ge/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k >= v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is greater than or equal to a specified value. For example, consider the following query:

    name,ssCpuSystem,:ge\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/ge/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 >= ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a >= b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a >= b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 0.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Info

    Note, the data points have floating point values. It is advisable to avoid relying on an exact equality match.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:ge\n
    "},{"location":"asl/ref/get/","title":"get","text":"Input Stack:k \u21e8 Output Stack:vars[k]

    Get the value of a variable and push it on the stack.

    Example:

    k,v,:set,\nk,:get\n
    PosInputOutput 0 k v"},{"location":"asl/ref/gt/","title":"gt","text":"

    Greater than operator. There are two variants of the :gt operator.

    "},{"location":"asl/ref/gt/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k > v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is greater than a specified value. For example, consider the following query:

    name,ssCpuSystem,:gt\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/gt/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 > ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a > b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a > b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 0.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 0.0 00:04 0.5 1.7 0.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:gt\n
    "},{"location":"asl/ref/has/","title":"has","text":"Input Stack:k: String \u21e8 Output Stack:Query

    Select time series that have a specified key. For example, consider the following query:

    nf.node,:has\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp ssCpuUser api i-0456"},{"location":"asl/ref/head/","title":"head","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :limit

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n2,:head\n
    "},{"location":"asl/ref/heatmap/","title":"heatmap","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Since 1.8.

    Plot the time series as a heatmap.

    See heatmap for more information.

    Example:

    Default
    name,sps,:eq,\n(,nf.cluster,),:by,\n:heatmap\n
    "},{"location":"asl/ref/in/","title":"in","text":"Input Stack:vs: List[String]k: String \u21e8 Output Stack:(k in vs): Query

    Select time series where the value for a key is in the specified set. For example, consider the following query:

    name,(,ssCpuUser,ssCpuSystem,),:in\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/integral/","title":"integral","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Sum the values across the evaluation context. This is typically used to approximate the distinct number of events that occurred. If the input is non-negative, then each datapoint for the output line will represent the area under the input line from the start of the graph to the time for that datapoint. Missing values, NaN, will be treated as zeroes. For example:

    Input :integral 0 0 1 1 -1 0 NaN 0 0 0 1 1 2 3 1 4 1 5 0 5

    For a counter, each data point represents the average rate per second over the step interval. To compute the total amount incremented, the value first needs to be converted to a rate per step interval. This conversion can be performed using the :per-step operation.

    Examples:

    BeforeAfter
    1\n
    1,:integral\n
    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum,\n:per-step\n
    name,requestsPerSecond,:eq,\n:sum,\n:per-step,\n:integral\n
    "},{"location":"asl/ref/le/","title":"le","text":"

    Less than or equal operator. There are two variants of the :le operator.

    "},{"location":"asl/ref/le/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k <= v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is less than or equal to a specified value. For example, consider the following query:

    name,ssCpuSystem,:le\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/le/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 <= ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a <= b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a <= b 00:01 0.0 0.0 1.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:le\n
    "},{"location":"asl/ref/legend/","title":"legend","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the legend text. Legends can contain variables based on the exact keys matched in the query clause and keys used in a group by. Variables start with a $ sign and can optionally be enclosed between parentheses. The parentheses are required for cases where the characters immediately following the name could be a part of the name. If a variable is not defined, then the name of the variable will be used as the substitution value.

    The variable atlas.offset can be used to indicate the time shift used for the underlying data.

    Examples:

    BeforeAfter
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\n$name,:legend\n
    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\ncluster+$nf.cluster,:legend\n
    "},{"location":"asl/ref/limit/","title":"limit","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Restrict the output to the first N lines from the input expression. The lines will be chosen in order based on the sort and order used.

    Example:

    AfterAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n3,:limit\n
    "},{"location":"asl/ref/line/","title":"line","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be line. This is the default mode and usually does not need to be set explicitly.

    See the line style examples page for more information.

    Example:

    Default
    name,sps,:eq,\n:sum,\n:line\n
    "},{"location":"asl/ref/list/","title":"list","text":"Input Stack:... \u21e8 Output Stack:List[?]

    Pop all items off the stack and push them as a list.

    Example:

    a,b,:list\n
    PosInputOutput 0 b List(b, a) 1 a
    ,:list\n
    PosInputOutput 0 List()"},{"location":"asl/ref/ls/","title":"ls","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the line style. The value should be one of:

    • line: this is the default, draws a normal line.
    • area: fill in the space between the line value and 0 on the Y-axis.
    • stack: stack the filled area on to the previous stacked lines on the same axis.
    • vspan: non-zero datapoints will be drawn as a vertical span.

    See the line style examples page for more information.

    Example:

    LineArea
    name,sps,:eq,\n:sum,\n(,name,),:by,\nline,:ls\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\narea,:ls\n
    StackVSpan
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nstack,:ls\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\n200e3,:gt,\nvspan,:ls\n
    "},{"location":"asl/ref/lt/","title":"lt","text":"

    Less than operator. There are two variants of the :lt operator.

    "},{"location":"asl/ref/lt/#choosing","title":"Choosing","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k < v): Query

    This first variant is used for choosing the set of time series to operate on. It selects time series that have a value for a key that is less than a specified value. For example, consider the following query:

    name,ssCpuSystem,:lt\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/lt/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 < ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a < b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a < b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 0.0 00:03 1.0 1.0 0.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for intervals where the condition is true and 0.0 for intervals where it is false.

    Example:

    BeforeAfter
    minuteOfHour,:time,\nhourOfDay,:time\n
    minuteOfHour,:time,\nhourOfDay,:time,\n:lt\n
    "},{"location":"asl/ref/lw/","title":"lw","text":"Input Stack:IntTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    The width of the stroke used when drawing the line.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\n(,name,),:by,\n2,:lw\n
    "},{"location":"asl/ref/map/","title":"map","text":"Input Stack:function: Listitems: List \u21e8 Output Stack:List(function(items[0], ..., items[N-1])

    Create a new list by applying a function to all elements of a list.

    Example:

    (,a%s,b%s,),(,(,.netflix.com,),:format,\n),:map\n
    PosInputOutput 0 List((, .netflix.com, ), :format) List(a.netflix.com, b.netflix.com) 1 List(a%s, b%s)"},{"location":"asl/ref/max/","title":"max","text":"

    Max aggregation operator. There are two variants of the :max operator.

    "},{"location":"asl/ref/max/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Select the maximum value for corresponding times across all matching time series.

    name,ssCpuUser,:eq,\n:max\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the max. This leads to a final result of:

    NameData ssCpuUser [8.0, 7.0, 6.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/max/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Select the maximum value for corresponding times across the time series resulting from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\n:max\n
    "},{"location":"asl/ref/median/","title":"median","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: (,50,),:percentiles

    BeforeAfter
    name,requestLatency,:eq\n
    name,requestLatency,:eq,\n:median\n
    "},{"location":"asl/ref/min/","title":"min","text":"

    Min aggregation operator. There are two variants of the :min operator.

    "},{"location":"asl/ref/min/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Select the minimum value for corresponding times across all matching time series.

    name,ssCpuUser,:eq,\n:min\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the max. This leads to a final result of:

    NameData ssCpuUser [1.0, 2.0, 2.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/min/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Select the minimum value for corresponding times across the time series resulting from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\n:min\n
    "},{"location":"asl/ref/mul/","title":"mul","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 * ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a * b) where a and b are the corresponding intervals in the input time series. NaNs in a series when other series are present are treated as 1.

    Example multiplying a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n1024,:mul\n

    Example multiplying two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:mul\n
    "},{"location":"asl/ref/named-rewrite/","title":"named-rewrite","text":"Input Stack:name: Stringrewritten: TimeSeriesExproriginal: TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Internal operation used by some macros to provide a more user friendly display expression. The expanded version will get used for evaluation, but if a new expression is generated from the parsed expression tree it will use the original version along with the named of the macro.

    BeforeAfter
    name,ssCpuUser,:eq,\n:dup,\n:dup,\n:sum,\n:swap,\n:count,\n:div\n
    name,ssCpuUser,:eq,\n:dup,\n:dup,\n:sum,\n:swap,\n:count,\n:div,\navg,:named-rewrite\n
    "},{"location":"asl/ref/ndrop/","title":"ndrop","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN

    Remove the top N items on the stack.

    Example:

    a,0,:ndrop\n
    PosInputOutput 0 0 a 1 a
    a,b,c,2,:ndrop\n
    PosInputOutput 0 2 a 1 c 2 b 3 a
    a,b,c,4,:ndrop\n
    PosInputOutput 0 4 1 c 2 b 3 a
    ,:ndrop\n

    Warning

    Throws an exception due to missing the N param.

    "},{"location":"asl/ref/neg/","title":"neg","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the negated value of the input time series.

    Example:

    064-64
    0,:neg\n
    64,:neg\n
    -64,:neg\n
    "},{"location":"asl/ref/nip/","title":"nip","text":"Input Stack:? \u21e8 Output Stack:?

    Shorthand equivalent to writing: :swap,:drop

    Example:

    a,b,:nip\n
    PosInputOutput 0 b b 1 a"},{"location":"asl/ref/nlist/","title":"nlist","text":"Input Stack:Na0...aN \u21e8 Output Stack:List(aN-1, ..., a0)aN

    Create a list with the top N items on the stack.

    Since: 1.5.0

    Examples:

    a,0,:nlist\n
    PosInputOutput 0 0 List() 1 a a
    a,b,c,2,:nlist\n
    PosInputOutput 0 2 List(b, c) 1 c a 2 b 3 a
    a,b,c,4,:nlist\n
    PosInputOutput 0 4 List(a, b, c) 1 c 2 b 3 a"},{"location":"asl/ref/node-avg/","title":"node-avg","text":"Input Stack:Query \u21e8 Output Stack:TimeSeriesExpr

    A helper to compute an average using the poller.asg.instance metric as the denominator. The common infrastructure tags will be used to restrict the scope for the denominator. This operator should be used instead of :avg if the goal is to compute an average per node.

    name,sps,:eq,\nnf.app,nccp,:eq,\n:and,\n:node-avg\n
    "},{"location":"asl/ref/not/","title":"not","text":"Input Stack:q: Query \u21e8 Output Stack:(!q): Query

    Select time series that have a specified key. For example, consider the following query:

    nf.node,:has,\n:not\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp ssCpuUser api i-0456"},{"location":"asl/ref/offset/","title":"offset","text":"Input Stack:DurationTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Warning

    Note that there is a deprecated List[Duration] variant that only modifes the presentation at the end. It cannot be used along with math operations.

    Shift the time frame to use when fetching the data. This is used to look at a previous interval as a point of reference, e.g., day-over-day or week-over-week. Offset cannot be used with streaming execution of the query, consider using the delay operator for short intervals to detect a change.

    Examples:

    BeforeAfterCombined
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\n1w,:offset\n
    name,sps,:eq,\n(,name,),:by,\n:dup,\n1w,:offset\n
    BeforeAfterCombined
    name,sps,:eq,\n(,name,),:by\n
    name,sps,:eq,\n(,name,),:by,\nPT1H,:offset\n
    name,sps,:eq,\n(,name,),:by,\n:dup,\nPT1H,:offset\n
    "},{"location":"asl/ref/or/","title":"or","text":"

    There are two variants of the :or operator.

    "},{"location":"asl/ref/or/#choosing","title":"Choosing","text":"Input Stack:q2: Queryq1: Query \u21e8 Output Stack:(q1 OR q2): Query

    This first variant is used for choosing the set of time series to operate on. It is a binary operator that matches if either of the sub-queries match. For example, consider the following query:

    nf.app,alerttest,:eq,\nname,ssCpuUser,:eq,\n:or\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/or/#math","title":"Math","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 OR ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a OR b) where a and b are the corresponding intervals in the input time series. For example:

    Time a b a OR b 00:01 0.0 0.0 0.0 00:01 0.0 1.0 1.0 00:02 1.0 0.0 1.0 00:03 1.0 1.0 1.0 00:04 0.5 1.7 1.0

    The result will be a signal time series that will be 1.0 for all intervals where the corresponding values of a or b are non-zero. Example:

    BeforeAfter
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n290,:lt\n
    minuteOfDay,:time,\n:dup,\n300,:gt,\n:swap,\n290,:lt,\n:or\n
    "},{"location":"asl/ref/order/","title":"order","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Order to use for sorting results. Supported values are asc and desc for ascending and descending order respectively. Default is asc.

    Since: 1.5

    Examples:

    SortedDefault
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nmax,:sort,\nasc,:order\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\ndesc,:order\n
    "},{"location":"asl/ref/over/","title":"over","text":"Input Stack:ba \u21e8 Output Stack:aba

    Copy the item in the second position on the stack to the top.

    Example:

    a,b,:over\n
    PosInputOutput 0 b a 1 a b 2 a"},{"location":"asl/ref/palette/","title":"palette","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Set the palette to use for the results of an expression. This operator is allows for scoping a palette to a particular group by instead of to all lines that share the same axis. A common use-case is to have multiple stacked group by expressions using different palettes. For example, suppose I want to create a graph showing overall request per second hitting my services with successful requests shown in shades of green and errors in shades of red. This can make it easy to visually see if a change is due to an increase in errors:

    Or a spike in successful requests:

    Examples:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\nreds,:palette\n
    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nreds,:palette\n
    "},{"location":"asl/ref/pct/","title":"pct","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :dup,:dup,:sum,:div,100,:mul,pct,:named-rewrite The percent contribution of an individual time series to a group.

    Example:

    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct\n
    BeforeAfterStack to 100%
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:pct,\n:stack\n
    "},{"location":"asl/ref/per-step/","title":"per-step","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Converts a line from a rate per second to a rate based on the step size of the graph. This is useful for getting an estimate of the raw number of events for a given interval.

    064-64
    0,:per-step\n
    64,:per-step\n
    -64,:per-step\n
    "},{"location":"asl/ref/percentiles-heatmap/","title":"percentiles-heatmap","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Since 1.8.

    Group the metric by the percentiles tag and plot the data as a heatmap. Requires that the metric to be recorded as a percentile.

    See heatmap for more information.

    Shorthand equivalent of writing (,percentile,),:by,:heatmap

    Example:

    Default
    name,requestLatency,:eq,\n:percentiles-heatmap\n
    "},{"location":"asl/ref/percentiles/","title":"percentiles","text":"Input Stack:percentiles: ListQuery \u21e8 Output Stack:TimeSeriesExpr

    Estimate percentiles for a timer or distribution summary. The data must have been published appropriately to allow the approximation. If using spectator, then see PercentileTimer and PercentileDistributionSummary helper classes.

    The percentile values can be shown in the legend using $percentile.

    Since: 1.5.0 (first in 1.5.0-rc.4)

    BeforeAfter
    name,requestLatency,:eq\n
    name,requestLatency,:eq,\n(,25,50,90,),:percentiles\n
    "},{"location":"asl/ref/pick/","title":"pick","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN-1a0...aN

    Pick an item in the stack and put a copy on the top.

    Since: 1.5.0

    Example:

    a,0,:pick\n
    PosInputOutput 0 0 a 1 a a
    a,b,0,:pick\n
    PosInputOutput 0 0 b 1 b b 2 a a
    a,b,1,:pick\n
    PosInputOutput 0 1 a 1 b b 2 a a"},{"location":"asl/ref/pow/","title":"pow","text":"Input Stack:TimeSeriesExprTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the value (a power b) where a and b are the corresponding intervals in the input time series.

    Examples:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n42,:pow\n
    BeforeAfter
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,sps,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:pow\n
    "},{"location":"asl/ref/random/","title":"random","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Generate a time series that appears to be random noise for the purposes of experimentation and generating sample data. To ensure that the line is deterministic and reproducible it actually is based on a hash of the timestamp. Each datapoint is a value between 0.0 and 1.0.

    Random
    :random\n
    "},{"location":"asl/ref/re/","title":"re","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k=~/^v/): Query

    Warning

    Regular expressions can be expensive to check and should be avoided if possible. When designing data to publish ensure that common query patterns would not need the use of regular expressions.

    Select time series where the value for a key matches the specified regular expression. For example, consider the following query:

    name,ssCpu,:re\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456

    The regular expression value will be automatically anchored at the start and the matching is case sensitive. Always try to have a simple prefix on the expression to allow for more efficient matching of the expression. For more information on supported patterns, see the Java regular expressions documentation.

    "},{"location":"asl/ref/reic/","title":"reic","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:(k=~/^v/i): Query

    Warning

    Ignoring the case will always result if a full scan for the key. This should be used sparingly and only for tag queries. If a case-insensitive match is not required, use :re intead.

    Select time series where the value for a key matches the specified regular expression with case insensitive matching. For example, consider the following query:

    name,ssCPU,:reic\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456

    Notice that the casing for the query does not match the data. The regular expression value will be automatically anchored at the start. For more information on supported patterns, see the Java regular expressions documentation.

    "},{"location":"asl/ref/roll/","title":"roll","text":"Input Stack:Na0...aN \u21e8 Output Stack:aN-1a0...aN-2aN

    Rotate an item in the stack and put it on the top.

    Since: 1.5.0

    Example:

    a,0,:roll\n
    PosInputOutput 0 0 a 1 a
    a,b,0,:roll\n
    PosInputOutput 0 0 b 1 b a 2 a
    a,b,1,:roll\n
    PosInputOutput 0 1 a 1 b b 2 a"},{"location":"asl/ref/rolling-count/","title":"rolling-count","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Number of occurrences within a specified window. This operation is frequently used in alerting expressions to reduce noise. For example:

    # Check to see if average cpu usage is > 80%\nname,cpuUser,:eq,:avg,80,:gt,\n\n# Only alert if that is true for more than 3 of the last 5\n# datapoints\n5,:rolling-count,3,:gt\n

    A value is counted if it is non-zero. Missing values, NaN, will be treated as zeroes. For example:

    Input 3,:rolling-count 0 0 1 1 -1 2 NaN 2 0 1 1 1 1 2 1 3 1 3 0 2

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    BeforeAfter
    :random,\n0.4,:gt\n
    :random,\n0.4,:gt,\n5,:rolling-count\n
    "},{"location":"asl/ref/rolling-max/","title":"rolling-max","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Maximum value within a specified window. This operation can be used in alerting expressions to find a lower bound for noisy data based on recent samples. For example:

    name,sps,:eq,:sum,\n:dup,\n5,:rolling-max\n

    Missing values, NaN, will be ignored when computing the min. If all values within the window are NaN, then NaN will be emitted. For example:

    Input 3,:rolling-max 0 0 1 1 -1 1 NaN 1 0 0 1 1 1 1 1 1 1 1 0 1

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    :random,\n0.4,:gt\n
    :random,\n0.4,:gt,\n5,:rolling-max\n
    "},{"location":"asl/ref/rolling-mean/","title":"rolling-mean","text":"Input Stack:minNumValues: Intn: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Mean of the values within a specified window. The mean will only be emitted if there are at least a minimum number of actual values (not NaN) within the window. Otherwise NaN will be emitted for that time period.

    Input 3,2,:rolling-mean 0 NaN 1 0.5 -1 0.0 NaN 0.0 NaN NaN 0 NaN 1 0.5 1 0.667 1 1 0 0.667

    The window size, n, is the number of datapoints to consider including the current value. There must be at least minNumValues non-NaN values within that window before it will emit a mean. Note that it is based on datapoints, not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,3,:rolling-mean\n
    "},{"location":"asl/ref/rolling-min/","title":"rolling-min","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Minimum value within a specified window. This operation can be used in alerting expressions to find a lower bound for noisy data based on recent samples. For example:

    name,sps,:eq,:sum,\n:dup,\n5,:rolling-min\n

    Missing values, NaN, will be ignored when computing the min. If all values within the window are NaN, then NaN will be emitted. For example:

    Input 3,:rolling-min 0 0 1 0 -1 -1 NaN -1 0 -1 1 0 1 0 1 1 1 1 0 0

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,:rolling-min\n
    "},{"location":"asl/ref/rolling-sum/","title":"rolling-sum","text":"Input Stack:n: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Sum of the values within a specified window.

    Input 3,:rolling-sum 0 0.0 1 1.0 -1 0.0 NaN 0.0 NaN -1.0 NaN NaN 1 1.0 1 2.0 1 3.0 0 2.0

    The window size, n, is the number of datapoints to consider including the current value. Note that it is based on datapoints, not a specific amount of time. As a result the number of occurrences will be reduced when transitioning to a larger time frame that causes consolidation.

    Since: 1.6

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n5,:rolling-sum\n
    "},{"location":"asl/ref/rot/","title":"rot","text":"Input Stack:b...a \u21e8 Output Stack:ab...

    Rotate the stack so that the item at the bottom is now at the top.

    Example:

    a,b,c,d,:rot\n
    PosInputOutput 0 d a 1 c d 2 b c 3 a b"},{"location":"asl/ref/s/","title":"s","text":"Input Stack:replacement: StringsearchPattern: StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Perform a search and replace on the legend strings. This command is similar to the global search and replace (s/regexp/replace/g) operation from tools like vim or sed.

    The replacement string can use variables to refer to the capture groups of the input expression. The syntax is that same as for legends.

    Since: 1.6

    Examples:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n^nccp-(.*)$,$1,:s\n
    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n^nccp-(?.*)$,$stack,:s\n\n\n\nBeforeAfter\n\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\nnccp-,_,:s\n
    \n\n\nBeforeAfter\n\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n$nf.cluster,:legend,\n([a-z]),_$1,:s\n
    "},{"location":"asl/ref/sdes-fast/","title":"sdes-fast","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to quickly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-fast\n
    "},{"location":"asl/ref/sdes-simple/","title":"sdes-simple","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Shorthand equivalent to writing: :dup,10,0.1,0.5,:sdes,sdes-simple,:named-rewrite

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-simple\n
    "},{"location":"asl/ref/sdes-slow/","title":"sdes-slow","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to slowly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-slow\n
    "},{"location":"asl/ref/sdes-slower/","title":"sdes-slower","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Helper for computing sliding DES using settings to slowly adjust to the input line. See recommended values for more information.

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:sdes-slower\n
    "},{"location":"asl/ref/sdes/","title":"sdes","text":"Input Stack:beta: Doublealpha: Doubletraining: IntTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Variant of :des that is deterministic as long as the step size does not change. One of the common complaints with DES is that to get the same value for a given time you must start feeding in data at exactly the same time. So for normal graphs where it is computed using the window of the chart it will have slightly different predictions for a given time. As it is often used for alerting this makes it cumbersome to try and determine:

    1. Why an alarm fired
    2. When alarms would have fired for tuning

    Sliding DES uses two DES functions and alternates between them. One will get trained while the other is getting used, and then the one that was getting used will get reset and the roles swapped.

     F1 | A |-- T1 --|-- P1 --|-- T1 --|-- P1 --|-- T1 --|\n F2 | A |        |-- T2 --|-- P2 --|-- T2 --|-- P2 --|\n\nResult:\n\n R  |-- NaN -----|-- P1 --|-- P2 --|-- P1 --|-- P2 --|\n

    Both functions will ignore any data until it reaches a boundary, even multiple, of the training window. That is shown as A in the diagram above. The first function will then start training, T1, and after the training window the first predicted values, P1, will get generated. The ouput line will alternate between the predictions from both DES functions.

    The alternation between functions can cause the prediction line to look choppier than DES, e.g., on a gradual drop:

    Further, since each prediction only considers data for a narrow window it will adjust to sharp changes faster. For example:

    Since: 1.5.0

    BeforeAfter
    name,requestsPerSecond,:eq,\n:sum,\n:per-step\n
    name,requestsPerSecond,:eq,\n:sum,\n5,0.1,0.5,:sdes\n
    "},{"location":"asl/ref/set/","title":"set","text":"Input Stack:vk \u21e8 Output Stack:

    Set the value of a variable.

    Example:

    k,v,:set\n
    PosInputOutput 0 v 1 k"},{"location":"asl/ref/sort/","title":"sort","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Sort the results of an expression in the legend by one of the summary statistics or by the legend text. The default behavior is to sort by the legend text. This will sort in ascending order by default, for descending order use order.

    Since: 1.5

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:sum,\n(,nf.cluster,),:by,\nmax,:sort\n
    "},{"location":"asl/ref/sqrt/","title":"sqrt","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute a new time series where each interval has the square root of the value from the input time series.

    064-64
    0,:sqrt\n
    64,:sqrt\n
    -64,:sqrt\n
    "},{"location":"asl/ref/srandom/","title":"srandom","text":"Input Stack:seed: Int \u21e8 Output Stack:TimeSeriesExpr

    Generate a time series that appears to be random noise for the purposes of experimentation and generating sample data. To ensure that the line is deterministic and reproducible it actually is based on a hash of the timestamp. The seed value is used to vary the values for the purposes of creating multiple different sample lines. Each datapoint is a value between 0.0 and 1.0.

    Example:

    Seeded Random: /api/v1/graph?w=200&h=125&s=e-3h&e=2012-01-01T07:00&tz=UTC&q=42,:srandom @@@"},{"location":"asl/ref/sset/","title":"sset","text":"Input Stack:kv \u21e8 Output Stack:

    Shorthand equivalent to writing: :swap,:set

    Example:

    a,b,:sset\n
    PosInputOutput 0 b 1 a"},{"location":"asl/ref/stack/","title":"stack","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be stack. In this mode the line will be filled to the previous stacked line on the same axis.

    See the line style examples page for more information.

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stack\n
    "},{"location":"asl/ref/starts/","title":"starts","text":"Input Stack:v: Stringk: String \u21e8 Output Stack:Query

    Select time series where the value for a key has the specified prefix. For example, consider the following query:

    name,ssCpu,:starts\n

    When matching against the sample data in the table below, the highlighted time series would be included in the result set:

    Namenf.appnf.node ssCpuUser alerttest i-0123 ssCpuSystem alerttest i-0123 ssCpuUser nccp i-0abc ssCpuSystem nccp i-0abc numRequests nccp i-0abc ssCpuUser api i-0456"},{"location":"asl/ref/stat-avg-mf/","title":"stat-avg-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to avg,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-avg-mf\n
    "},{"location":"asl/ref/stat-avg/","title":"stat-avg","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the avg,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have an average value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-avg,\n5e3,:gt,\n:stat-avg,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-count/","title":"stat-count","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the count,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the count value is greater than 50:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-count,\n50,:gt,\n:filter\n
    "},{"location":"asl/ref/stat-last/","title":"stat-last","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the last,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the last value is greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-last,\n5e3,:gt,\n:stat-last,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-max-mf/","title":"stat-max-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to max,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-max-mf\n
    "},{"location":"asl/ref/stat-max/","title":"stat-max","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the max,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have a maximum value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-max,\n5e3,:gt,\n:stat-max,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-min-mf/","title":"stat-min-mf","text":"

    Warning

    Deprecated: use :stat instead.

    Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Equivalent to min,:stat. Example of usage:

    BeforeAfter
    name,sps,:eq,\n:sum\n
    name,sps,:eq,\n:sum,\n:stat-min-mf\n
    "},{"location":"asl/ref/stat-min/","title":"stat-min","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the min,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines that have a minimum value greater than 5k and less than 20k:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-min,\n5e3,:gt,\n:stat-min,\n20e3,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat-total/","title":"stat-total","text":"Input Stack: \u21e8 Output Stack:TimeSeriesExpr

    Represents the total,:stat of the input time series when used with the filter operation. The filter operator will automatically fill in the input when used so the user does not need to repeat the input expression for the filtering criteria.

    Example of restricting to lines where the sum of all data points for the line is greater than 1M and less than 4M:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stat-total,\n1e6,:gt,\n:stat-total,\n4e6,:lt,\n:and,\n:filter\n
    "},{"location":"asl/ref/stat/","title":"stat","text":"Input Stack:StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Create a summary time series showing the value of the specified summary statistic for the data points of the input time series. Valid statistic values are avg, count, max, min, last, and total. The graph below shows avg, max, min, and last for a simple input time series:

    The count is the number of data points for the time series. In the example above, that is five since the last value is NaN. The total is the sum of the data points for the time series.

    The most common usage of stats is in conjunction with :filter to restrict the set of results for grouped expression. When filtering, helper macros, :stat-$(name), can be used to represent applying the statistic to the input time series being filtered without explicitly repeating the input expression.

    Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\navg,:stat\n
    "},{"location":"asl/ref/stddev/","title":"stddev","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the standard deviation for the results of a group by. If the underlying data is for a timer or distribution summary, then dist-stddev is likely a better choice.

    Since: 1.6

    Example:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\n:stddev\n
    "},{"location":"asl/ref/sub/","title":"sub","text":"Input Stack:ts2: TimeSeriesExprts1: TimeSeriesExpr \u21e8 Output Stack:(ts1 - ts2): TimeSeriesExpr

    Compute a new time series where each interval has the value (a subtractNaN b) where a and b are the corresponding intervals in the input time series.

    :sub 1.0 0.0 1.0 1.0 NaN Input 1 2.0 0.0 1.0 1.0 NaN Input 2 1.0 0.0 0.0 NaN NaN

    Use the fsub operator to get strict floating point behavior.

    Example subtracting a constant:

    BeforeAfter
    name,sps,:eq\n
    name,sps,:eq,\n30e3,:sub\n

    Example subtracting two series:

    BeforeAfter
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by\n
    name,requestLatency,:eq,\n:sum,\nname,requestsPerSecond,:eq,\n:max,\n(,name,),:by,\n:sub\n
    "},{"location":"asl/ref/sum/","title":"sum","text":"

    Sum aggregation operator. There are two variants of the :sum operator.

    "},{"location":"asl/ref/sum/#aggregation","title":"Aggregation","text":"Input Stack:Query \u21e8 Output Stack:AggregationFunction

    Compute the sum of all the time series that match the query. Sum is the default aggregate used if a query is specified with no explicit aggregate function. Example with implicit sum:

    name,ssCpuUser,:eq\n

    Equivalent example with explicit sum:

    name,ssCpuUser,:eq,\n:sum\n

    When matching against the sample data in the table below, the highlighted time series would be included in the aggregate result:

    Namenf.appnf.nodeData ssCpuUser alerttest i-0123 [1.0, 2.0, NaN] ssCpuSystem alerttest i-0123 [3.0, 4.0, 5.0] ssCpuUser nccp i-0abc [8.0, 7.0, 6.0] ssCpuSystem nccp i-0abc [6.0, 7.0, 8.0] numRequests nccp i-0abc [1.0, 2.0, 4.0] ssCpuUser api i-0456 [1.0, 2.0, 2.0]

    The values from the corresponding intervals will be aggregated. For the first interval using the sample data above the values are 1.0, 8.0, and 1.0. Each value other than NaN contributes one to the sum. This leads to a final result of:

    NameData ssCpuUser [10.0, 11.0, 8.0]

    The only tags for the aggregated result are those that are matched exactly (:eq clause) as part of the choosing criteria or are included in a group by.

    "},{"location":"asl/ref/sum/#math","title":"Math","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Compute the sum of all the time series from the input expression. This is typically used when there is a need to use some other aggregation for the grouping. Example:

    BeforeAfter
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n:max,\n(,nf.cluster,),:by,\n:sum\n
    "},{"location":"asl/ref/swap/","title":"swap","text":"Input Stack:ba \u21e8 Output Stack:ab

    Swap the top two items on the stack.

    Example:

    a,b,:swap\n
    PosInputOutput 0 b a 1 a b"},{"location":"asl/ref/time-span/","title":"time-span","text":"Input Stack:e: Strings: String \u21e8 Output Stack:TimeSeriesExpr

    Generates a signal line based on the specified time range. The line will be 1 within the range and 0 for all other times. The format of the start and end times is the same as the start and end time parameters on the Graph API. If the time zone is not explicitly specified, then the value from the tz variable will get used. The default value for the tz variable is the primary time zone used for the graph.

    The following named times are supported for time spans:

    Name Description gs Graph start time. ge Graph end time. s Start time for the span, can only be used for the end time. e End time for the span, can only be used for the start time. now Current time. epoch January 1, 1970 UTC.

    Since: 1.6

    Example:

    RelativeAbsolute
    e-30m,ge,:time-span\n
    2014-02-20T13:00,s%2B30m,:time-span\n
    "},{"location":"asl/ref/time/","title":"time","text":"Input Stack:String \u21e8 Output Stack:TimeSeriesExpr

    Generates a line based on the current time. Supported modes are:

    • secondOfMinute
    • secondOfDay
    • minuteOfHour
    • minuteOfDay
    • hourOfDay
    • dayOfWeek
    • dayOfMonth
    • dayOfYear
    • monthOfYear
    • yearOfCentury
    • yearOfEra
    • seconds (since epoch)
    • days (since epoch)

    The mode can also be a value of the enum ChronoField.

    Examples:

    Hour of DayEnum
    hourOfDay,:time\n
    HOUR_OF_DAY,:time\n
    "},{"location":"asl/ref/topk-others-avg/","title":"topk-others-avg","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes an average aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-avg\n
    "},{"location":"asl/ref/topk-others-max/","title":"topk-others-max","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a max aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-max\n
    "},{"location":"asl/ref/topk-others-min/","title":"topk-others-min","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a min aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-min\n
    "},{"location":"asl/ref/topk-others-sum/","title":"topk-others-sum","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic and computes a sum aggregate for the other time series. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk-others-sum\n
    "},{"location":"asl/ref/topk/","title":"topk","text":"Input Stack:k: Intstat: StringTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Since: 1.7

    Restrict the output for a grouped expression to the k time series with the largest value for the specified summary statistic. Example of usage:

    BeforeAfter
    name,sps,:eq,\n(,nf.cluster,),:by\n
    name,sps,:eq,\n(,nf.cluster,),:by,\nmax,2,:topk\n

    In some cases it can be useful to see an aggregate summary of the other time series that were not part of the top set. This can be accomplished using the :topk-others-$(aggr) operators. For more details see:

    • :topk-others-avg
    • :topk-others-max
    • :topk-others-min
    • :topk-others-sum
    "},{"location":"asl/ref/trend/","title":"trend","text":"Input Stack:window: DurationTimeSeriesExpr \u21e8 Output Stack:TimeSeriesExpr

    Warning

    Deprecated: Use :rolling-mean instead.

    Computes a moving average over the input window. Until there is at least one sample for the whole window it will emit NaN. If the input line has NaN values, then they will be treated as zeros. Example:

    Input 2m,:trend 5m,:trend 0 NaN NaN 1 0.5 NaN -1 0.0 NaN NaN -0.5 NaN 0 0.0 0.0 1 0.5 0.2 2 1.5 0.4 1 1.5 0.8 1 1.0 1.0 0 0.5 1.0

    The window size is specified as a range of time. If the window size is not evenly divisible by the step size, then the window size will be rounded down. So a 5m window with a 2m step would result in a 4m window with two datapoints per average. A step size larger than the window will result in the trend being a no-op.

    Examples:

    5 Minutes20 Minutes
    :random,\nPT5M,:trend\n
    :random,\n20m,:trend\n
    "},{"location":"asl/ref/true/","title":"true","text":"Input Stack: \u21e8 Output Stack:Query

    Query expression that will match any input time series. See also :false.

    "},{"location":"asl/ref/tuck/","title":"tuck","text":"Input Stack:ba \u21e8 Output Stack:bab

    Shorthand equivalent to writing: :swap,:over

    Example:

    a,b,:tuck\n
    PosInputOutput 0 b b 1 a a 2 b"},{"location":"asl/ref/vspan/","title":"vspan","text":"Input Stack:TimeSeriesExpr \u21e8 Output Stack:StyleExpr

    Change the line style to be a vertical span. In this mode any non-zero datapoints on the line will be shown as a span. This is frequently used to visualize when an alert would have fired.

    See the line style examples page for more information.

    Example:

    BeforeAfter
    name,sps,:eq,\n:sum,\n:dup,\n20e3,:gt\n
    name,sps,:eq,\n:sum,\n:dup,\n20e3,:gt,\n:vspan\n
    "},{"location":"concepts/consolidation/","title":"Consolidation","text":"

    TODO

    "},{"location":"concepts/naming/","title":"Naming","text":""},{"location":"concepts/naming/#summary","title":"Summary","text":"
    1. Names
      • Describe the measurement being collected
      • Use camelCase
      • Static
      • Succinct
    2. Tags
      • Should be used for dimensional filtering
      • Be careful about combinatorial explosion
      • Tag keys should be static
      • Use id to distinguish between instances
    3. Use Base Units
    "},{"location":"concepts/naming/#names","title":"Names","text":""},{"location":"concepts/naming/#describe-the-measurement","title":"Describe the Measurement","text":""},{"location":"concepts/naming/#use-camelcase","title":"Use camelCase","text":"

    The main goal here is to promote consistency, which makes it easier for users. The choice of style is somewhat arbitrary, but camelCase was chosen because:

    • Used by SNMP
    • Used by Java
    • It was commonly used at Netflix when the guideline was written

    The exception to this rule is where there is an established common case. For example, with Amazon regions, it is preferred to use us-east-1 rather than usEast1 as it is the more common form.

    "},{"location":"concepts/naming/#static","title":"Static","text":"

    There should not be any dynamic content in a metric name, such as requests.$APP_NAME. Metric names and tag keys are how users interact with the data, and dynamic values make them difficult to use. Dynamic information is better suited for tag values, such as nf.app or status.

    "},{"location":"concepts/naming/#succinct","title":"Succinct","text":"

    Long names should be avoided. In many cases, long names are the result of combining many pieces of information together into a single string. In this case, consider either discarding information that is not useful or encoding the information in tag values.

    "},{"location":"concepts/naming/#tags","title":"Tags","text":"

    Historically, tags have been used to play one of two roles:

    • Dimensions. This is the primary use of tags and this feature allows the data to be filtered into subsets by values of interest.
    • Namespace. Similar to packages in Java, this allows grouping related data. This type of usage is discouraged.

    As a general rule, it should be possible to use the name as a pivot. If only the name is selected, then the user should be able to use other dimensions to filter the data and successfully reason about the value being shown.

    As a concrete example, suppose we have two metrics:

    1. The number of threads currently in a thread pool.
    2. The number of rows in a database table.
    "},{"location":"concepts/naming/#discouraged-approach","title":"Discouraged Approach","text":"
    Id poolSize = registry.createId(\"size\")\n  .withTag(\"class\", \"ThreadPool\")\n  .withTag(\"id\", \"server-requests\");\n\nId poolSize = registry.createId(\"size\")\n  .withTag(\"class\", \"Database\")\n  .withTag(\"table\", \"users\");  \n

    In this approach, if you select the name size, then it will match both the ThreadPool and Database classes. This results in a value that is the an aggregate of the number of threads and the number of items in a database, which has no meaning.

    "},{"location":"concepts/naming/#recommended-approach","title":"Recommended Approach","text":"
    Id poolSize = registry.createId(\"threadpool.size\")\n  .withTag(\"id\", \"server-requests\");\n\nId poolSize = registry.createId(\"db.size\")\n  .withTag(\"table\", \"users\");  \n

    This variation provides enough context, so that if just the name is selected, the value can be reasoned about and is at least potentially meaningful.

    This variation provides enough context in the name so that the meaning is more apparent and you can successfully reason about the values. For example, if you select threadpool.size, then you can see the total number of threads in all pools. You can then group by or select an id to further filter the data to a subset in which you have an interest.

    "},{"location":"concepts/naming/#use-base-units","title":"Use Base Units","text":"

    Keep measurements in base units where possible. It is better to have all timers in seconds, disk sizes in bytes, and network rates in bytes/second. This allows any SI unit prefixes applied to tick labels on a graph to have an obvious meaning, such as 1k meaning 1 kilobyte, as opposed to 1 kilo-megabyte.

    "},{"location":"concepts/normalization/","title":"Normalization","text":"

    In Atlas, this usually refers to normalizing data points to step boundaries. Suppose that values are actually getting reported at 30 seconds after the minute, instead of exactly on the minute. The values will get normalized to the minute boundary, so that all time series in the system are consistent.

    How a normalized value is computed depends on the data source type. Atlas supports three types indicated by the value of the atlas.dstype tag. In general, you should not need to worry about that, client libraries like Spectator will automatically handle tagging based on the data source type.

    It is recommended to at least skim through the normalization for gauges and rates to better understand how the values you see actually relate to measured data.

    "},{"location":"concepts/normalization/#gauge","title":"Gauge","text":"

    A value that is sampled from some source and the value is used as is. The last value received will be the value used for the interval. For example:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510                                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502                                    \u2502    8    \u2502\n                \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500                             \u2502         \u2502\n                \u2502         \u2502    6                               \u2502         \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502\n 4    \u2502         \u2502         \u2502                \u2502    4    \u2502         \u2502         \u2502\n      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to   \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502\n      \u2502    2    \u2502         \u2502                \u2502         \u2502    2    \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n
    "},{"location":"concepts/normalization/#rate","title":"Rate","text":"

    A rate is a value representing the rate per second since the last reported value. Rate values are normalized using a weighted average. For example:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502                                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500                             \u2502    7    \u2502\n                \u2502         \u2502    6                     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502         \u2502                          \u2502    5    \u2502         \u2502\n 4    \u2502         \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502         \u2502\n      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to   \u2502    3    \u2502         \u2502         \u2502\n      \u2502    2    \u2502         \u2502                \u2502         \u2502         \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n

    Here, the data is reported at exactly 30s after the minute boundary. So each value represents the average rate per second for 50% of the minute.

    Time Value 1:01 4 * 0.5 + 2 * 0.5 = 2 + 1 = 3 1:02 2 * 0.5 + 8 * 0.5 = 1 + 4 = 5 1:03 8 * 0.5 + 6 * 0.5 = 4 + 3 = 7

    If many samples are received for a given interval, then they will each be weighted based on the fraction of the interval they represent. When no previous sample exists, the value will be treated as the average rate per second over the previous step. This behavior is important to avoid under-counting the contribution from a previous interval. The example below shows what happens if there is no previous or next sample:

                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502    8    \u2502\n                \u2502         \u2502\n                \u2502         \u2502                          \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u2502                          \u2502    5    \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                \u2502         \u2502                          \u2502         \u2502    4    \u2502\n      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502           to        1    \u2502         \u2502         \u2502\n      \u2502    2    \u2502         \u2502                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524         \u2502         \u2502\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524           \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n1:00      1:01      1:02      1:03        1:00      1:01      1:02      1:03\n

    Why perform weighted averaging for rates instead of the simpler last value approach used with gauges? Because it gives us a better summary of what we actually know from the measurements received. In practical terms:

    • Avoids dropping information if samples are more frequent than the step. Suppose we have a 1 minute step, but data is actually getting reported every 10s. For this example, assume we get 1, 5, 90, 5, 4, and 2. The last value normalization used with Gauges would end up with a value of 2. The rate normalization will give 17.833. Each value is a rate per second, so if you take the (1 + 5 + 90 + 5 + 4 + 2) * 10 = 1070 actual events measured during the interval. That is equivalent to 17.833 * 60 indicating we have an accurate average rate for the step size.
    • Avoids skewing the data causing misleading spikes or drops in the aggregates. Using Atlas you will typically be looking at an aggregate of time series rather than an individual time series that was reported. With last value it can have the effect of skewing samples to a later interval. Suppose the client is reporting once a minute at 5s after the minute. That value indicates more about the previous interval than it does the current one. During traffic transitions, such as moving traffic over to a new cluster or even some auto-scaling events, differences in this skew can result in the appearance of a drop because there will be many new time series getting reported with a delayed start. For existing time series it is still skewed, but tends to be less noticeable. The weighted averaging avoids these problems for the most part.
    "},{"location":"concepts/normalization/#counter","title":"Counter","text":"

    Counter is similar to rate, except that the value reported is monotonically increasing and will be converted to a rate by the backend. The conversion is done by computing the delta between the current sample and the previous sample and dividing by the time between the samples. After that it is the same as a rate.

    Note, that unless the input is a montonically increasing counter it is generally better to have the client perform rate conversion. Since, the starting value is unknown, at least two samples must be received before the first delta can be computed. This means that new time series relying on counter type will be delayed by one interval.

    "},{"location":"concepts/time-series/","title":"Time Series","text":"

    A time series is a sequence of data points reported at a consistent interval over time. The time interval between successive data points is called the step size. In Atlas, each time series is paired with metadata called tags that allow us to query and group the data.

    "},{"location":"concepts/time-series/#tags","title":"Tags","text":"

    A set of key value pairs associated with a time series. Each time series must have at least one tag with a key of name. To make it more concrete, here is an example of a tag set represented as a JSON object:

    {\n  \"name\":       \"server.requestCount\",\n  \"status\":     \"200\",\n  \"endpoint\":   \"api\",\n  \"nf.app\":     \"fooserver\",\n  \"nf.cluster\": \"fooserver-main\",\n  \"nf.stack\":   \"main\",\n  \"nf.region\":  \"us-east-1\",\n  \"nf.zone\":    \"us-east-1c\",\n  \"nf.node\":    \"i-12345678\"\n}\n

    Usage of tags typically falls into two categories:

    1. Namespace. These are tags necessary to qualify a name, so that it can be meaningfully aggregated. Using the sample above, consider computing the sum of all metrics for application fooserver. That number would be meaningless. Properly modelled data should try to make the aggregates meaningful by selecting the name. The sum of all metrics with name = server.requestCount is the overall request count for the service.
    2. Dimensions. These are tags used to filter the data to a meaningful subset. They can be used to see the number of successful requests across the cluster by querying for status = 200 or the number of requests for a single node by querying for nf.node = i-12345678. Most tags should fall into this category.

    When creating metrics, it is important to carefully think about how the data should be tagged. See the naming docs for more information.

    "},{"location":"concepts/time-series/#metric","title":"Metric","text":"

    A metric is a specific quantity being measured, e.g., the number of requests received by a server. In casual language about Atlas metric is often used interchangeably with time series. A time series is one way to track a metric and is the method supported by Atlas. In most cases there will be many time series for a given metric. Going back to the example, request count would usually be tagged with additional dimensions such as status and node. There is one time series for each distinct combination of tags, but conceptually it is the same metric.

    "},{"location":"concepts/time-series/#data-point","title":"Data Point","text":"

    A data point is a triple consisting of tags, timestamp, and a value. It is important to understand at a high level how data points correlate with the measurement. Consider requests hitting a server, this would typically be measured using a counter. Each time a request is received the counter is incremented. There is not one data point per increment, a data point represents the behavior over a span of time called the step size. The client library will sample the counter once for each interval and report a single value.

    Suppose that each circle in the diagram below represents a request:

    1:00       1:01       1:02       1:03\n \u251c\u2500\u25cf\u2500\u2500\u2500\u2500\u25cf\u25cf\u25cf\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n

    There are 5 requests shown, 4 from 1:00 to 1:01, and 1 from 1:02 to 1:03. Assuming all requests incremented the same time series, i.e. all other dimensions such as status code are the same, then this would result in three data points. For counters values are always a rate per second, so for a one minute step size the total number of requests would be divided by 60 seconds. So the values stored would be:

    Time Value 1:01 4 / 60 = 0.0667 1:02 0 / 60 = 0.0000 1:03 1 / 60 = 0.0167"},{"location":"concepts/time-series/#step-size","title":"Step Size","text":"

    The amount of time between two successive data points in a time series. For Atlas the datapoints will always be on even boundaries of the step size. If data is not reported on step boundaries, it will get normalized to the boundary.

    "},{"location":"spectator/","title":"Overview","text":"

    Simple library for instrumenting code to record dimensional time series data.

    At a minimum, you need to:

    1. Understand core concepts.

      • Time Series
      • Normalization
      • Naming
      • Clock
    2. Install the metrics agent.

      • SpectatorD
    3. Install the language-specific library and configuration bindings, where available.

      • Support Class Descriptions
        • Language Overview
      • First-Class Support
        • C++
        • Go
        • Java
        • Node.js
        • Python
      • Best-Effort Support
        • Rust (internal library)
    4. Instrument some code, referring to the core usage guides on the following meter types:

      • Counters
      • Distribution Summaries
      • Gauges
      • Percentile Timers
      • Timers

    After you are more familiar with the library and need assistance with more advanced topics, see the Patterns section on the left.

    "},{"location":"spectator/agent/metrics/","title":"SpectatorD Metrics","text":""},{"location":"spectator/agent/metrics/#spectatormeasurements","title":"spectator.measurements","text":"

    The number of measurements that have either been sent to an Atlas backend or dropped.

    Unit: measurements/second

    Dimensions:

    • id: One of sent or dropped.
    • error: The type of error that occurred, one of http-error, validation, or other.
    • owner: spectatord
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatorregistrysize","title":"spectator.registrySize","text":"

    The number of measurements stored in the registry.

    Unit: measurements

    Dimensions:

    • owner: spectatord
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordparsedcount","title":"spectatord.parsedCount","text":"

    The number of input lines parsed.

    Unit: lines/second

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordparseerrors","title":"spectatord.parseErrors","text":"

    The number of errors that have occurred while parsing input lines.

    Unit: lines/second

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpercentilecachesize","title":"spectatord.percentileCacheSize","text":"

    The number of Distribution Summaries and/or Percentile Timers that have been updated recently in the dedicated cache.

    Unit: meters

    Dimensions:

    • id: One of dist-summary or timer.
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpercentileexpired","title":"spectatord.percentileExpired","text":"

    The number of Distribution Summaries and/or Percentile Timers that have been expired from the dedicated cache.

    Unit: meters/second

    Dimensions:

    • id: One of dist-summary or timer.
    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpoolallocsize","title":"spectatord.poolAllocSize","text":"

    The size of the internal string pool.

    Unit: bytes

    Dimensions:

    • Common Infrastructure
    "},{"location":"spectator/agent/metrics/#spectatordpoolentries","title":"spectatord.poolEntries","text":"

    The number of entries in the internal string pool.

    Unit: entries

    "},{"location":"spectator/agent/usage/","title":"Usage","text":""},{"location":"spectator/agent/usage/#spectatord-introduction","title":"SpectatorD Introduction","text":"

    SpectatorD is a high-performance telemetry agent that listens for metrics specified by a text-based protocol and publishes updates periodically to an Atlas aggregator service. It consolidates the logic required to apply common tagging to all metrics received, maintain metric lifetimes, and route metrics to the correct backend.

    The preferred method of using spectatord is to use one of the thin-client implementations, however, the text-based protocol was designed to make it easy for community-supported clients to be developed. It is also easy to use in shell scripts with common command line tools.

    "},{"location":"spectator/agent/usage/#command-line-configuration-flags","title":"Command Line Configuration Flags","text":"
    spectatord --help\nspectatord: A daemon that listens for metrics and reports them to Atlas.\n\n    --admin_port (Port number for the admin server.); default: 1234;\n    --age_gauge_limit (The maximum number of age gauges that may be reported by\n      this process.); default: 1000;\n    --common_tags (Common tags: nf.app=app,nf.cluster=cluster. Override the\n      default common tags. If empty, then spectatord will use the default set.\n      This flag should only be used by experts who understand the risks.);\n      default: \"\";\n    --debug (Debug spectatord. All values will be sent to a dev aggregator and\n      dropped.); default: false;\n    --enable_external (Enable external publishing.); default: false;\n    --enable_socket (Enable UNIX domain socket support. Default is true on Linux\n      and false on MacOS.); default: true;\n    --enable_statsd (Enable statsd support.); default: false;\n    --metatron_dir (Path to the Metatron certificates, which are used for\n      external publishing. A number of well-known directories are searched by\n      default. This option is only necessary if your certificates are in an\n      unusual location.); default: \"\";\n    --meter_ttl (Meter TTL: expire meters after this period of inactivity.);\n      default: 15m;\n    --no_common_tags (No common tags will be provided for metrics. Since no\n      common tags are available, no internal status metrics will be recorded.\n      Only use this feature for special cases where it is absolutely necessary\n      to override common tags such as nf.app, and only use it with a secondary\n      spectatord process.); default: false;\n    --port (Port number for the UDP socket.); default: 1234;\n    --socket_path (Path to the UNIX domain socket.);\n      default: \"/run/spectatord/spectatord.unix\";\n    --statsd_port (Port number for the statsd socket.); default: 8125;\n    --uri (Optional override URI for the aggregator.); default: \"\";\n    --verbose (Use verbose logging.); default: false;\n    --verbose_http (Output debug info for HTTP requests.); default: false;\n\nTry --helpfull to get a list of all flags or --help=substring shows help for\nflags which include specified substring in either in the name, or description or\npath.\n
    "},{"location":"spectator/agent/usage/#endpoints","title":"Endpoints","text":"

    By default, the daemon will listen on the following endpoints:

    • Metrics Message Protocol
    • 1234/udp (~430K reqs/sec with 16MB buffers)
    • /run/spectatord/spectatord.unix Domain Socket (~1M reqs/sec with batching)
    • Admin Server: 1234/tcp

    The choice of which endpoint to use is determined by your performance and access requirements; the Unix domain socket offers higher performance, but requires filesystem access, which may not be tenable under some container configurations. See Performance Numbers for more details.

    "},{"location":"spectator/agent/usage/#usage-examples","title":"Usage Examples","text":"

    :warning: In container environments, the -w0 option may not work and -w1 should be used instead.

    echo \"c:server.numRequests,id=failed:1\" | nc -u -w0 localhost 1234\necho \"t:server.requestLatency:0.042\" | nc -u -w0 localhost 1234\necho \"d:server.responseSizes:1024\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"g:someGauge:60\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"g,300:anotherGauge:60\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"X,1543160297100:monotonic.Source:42\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"X,1543160298100:monotonic.Source:43\" | nc -w0 -uU /run/spectatord/spectatord.unix\necho \"A:age.gauge:0\" | nc -u -w0 localhost 1234\n
    "},{"location":"spectator/agent/usage/#message-format","title":"Message Format","text":"

    The message sent to the server has the following format, where the ,options and ,tags portions are optional:

    metric-type,options:name,tags:value\n

    Multiple lines may be sent in the same packet, separated by newlines (\\n):

    echo -e \"t:server.requestLatency:0.042\\nd:server.responseSizes:1024\" | nc -u -w0 localhost 1234\n
    "},{"location":"spectator/agent/usage/#metric-types","title":"Metric Types","text":"Metric Type Symbol Description Age Gauge A The value is the time in seconds since the epoch at which an event has successfully occurred, or 0 to use the current time in epoch seconds. After an Age Gauge has been set, it will continue reporting the number of seconds since the last time recorded, for as long as the spectatord process runs. The purpose of this metric type is to enable users to more easily implement the Time Since Last Success alerting pattern. To set a specific time as the last success: A:time.sinceLastSuccess:1611081000. To set now() as the last success: A:time.sinceLastSuccess:0. By default, a maximum of 1000 Age Gauges are allowed per spectatord process, because there is no mechanism for cleaning them up. This value may be tuned with the --age_gauge_limit flag on the spectatord binary. Counter c The value is the number of increments that have occurred since the last time it was recorded. The value will be reported to the backend as a rate-per-second. Distribution Summary d The value tracks the distribution of events. It is similar to a Timer, but more general, because the size does not have to be a period of time. For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query. Gauge g The value is a number that was sampled at a point in time. The default time-to-live (TTL) for gauges is 900 seconds (15 minutes) - they will continue reporting the last value set for this duration of time. Optionally, the TTL may be specified in seconds, with a minimum TTL of 5 seconds. For example, g,120:gauge:42.0 spcifies a gauge with a 120 second (2 minute) TTL. Max Gauge m The value is a number that was sampled at a point in time, but it is reported as a maximum gauge value to the backend. Monotonic Counter (double) C The value is a monotonically increasing number. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend as a rate-per-second. The value is a double data type, and negative deltas are ignored. This data type provides flexibility for transforming values into base units with division. Commonly used with networking metrics. Monotonic Counter (uint64) U The value is a monotonically increasing number. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend as a rate-per-second. The value is a uint64 data type, and it will handle rollovers. Commonly used with networking metrics. Monotonic Counter (double) with Millisecond Timestamps X The value is a monotonically increasing number, sampled at a specified number of milliseconds since the epoch. A minimum of two samples must be received in order for spectatord to calculate a delta value and report it to the backend. The value should be a uint64 data type, and it will handle rollovers. This is an experimental metric type that can be used to track monotonic sources that were sampled in the recent past, with the value normalized over the reported time period. The timestamp in milliseconds since the epoch when the value was sampled must be included as a metric option: X,1543160297100:monotonic.Source:42 Percentile Distribution Summary D The value tracks the distribution of events, with percentile estimates. It is similar to a Percentile Timer, but more general, because the size does not have to be a period of time. For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query. In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Distribution Summary. Be diligent about any additional dimensions added to Percentile Distribution Summaries and ensure that they have a small bounded cardinality. Percentile Timer T The value is the number of seconds that have elapsed for an event, with percentile estimates. This metric type will track the data distribution by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions. In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality. Timer t The value is the number of seconds that have elapsed for an event.

    The data type for all numbers except U is double. The U values are recorded as uint64_t, and the calculated deltas are passed to the backend as double. Passing negative values for uint64_t data types will cause the parsed string value to rollover.

    "},{"location":"spectator/agent/usage/#metric-name-and-tags","title":"Metric Name and Tags","text":"

    The metric name and tags must follow Atlas restrictions, which are described in the sections below.

    Tags are optional. They may be specified as comma-separated key=value pairs after the metric name. For example:

    fooIsTheName,some.tag=val1,some.otherTag=val2\n

    See Atlas Naming Conventions for recommendations on naming metrics.

    "},{"location":"spectator/agent/usage/#length-restrictions","title":"Length Restrictions","text":"Limit Min Max Length of name 1 255 Tag key length 2 60 Tag value length 1 120"},{"location":"spectator/agent/usage/#allowed-characters","title":"Allowed Characters","text":"

    The metric name, tag keys and values may only use characters in the following set: -._A-Za-z0-9.

    All others characters will be converted to an underscore (_) by the client.

    To avoid issues with parsing metrics, avoid using the SpectatorD protocol delimiter characters (,=:) rather than relying on the client to rewrite them to _.

    "},{"location":"spectator/agent/usage/#metric-value","title":"Metric Value","text":"

    A double value, or a uint64 value for one kind of Monotonic Counters. The meaning of the value depends on the metric type.

    "},{"location":"spectator/agent/usage/#metrics","title":"Metrics","text":"

    See Metrics for a list of metrics published by this service.

    "},{"location":"spectator/agent/usage/#admin-server","title":"Admin Server","text":"

    An administrative server is provided with SpectatorD, so that debugging information and few data management tasks may be completed. By default, this server listens on port 1234/TCP, but this can be modified with the --admin_port flag. The endpoints which change data may only be accessed from localhost.

    • GET /
      • Returns a service description and list of available endpoints.
    • GET /config
      • Returns the current SpectatorD configuration, including the current set of common tags.
    • GET /config/common_tags
      • Returns a description of how to use this endpoint to modify common tags.
    • POST /config/common_tags
      • Create, modify or delete common tags from the allowed set of Mantis common tags. No other common tags may be modified. Create or update a tag by setting it to a string. Delete a tag by setting the value to an empty string.
      • Allowed tags:
        • mantisJobId
        • mantisJobName
        • mantisUser
        • mantisWorkerIndex
        • mantisWorkerNumber
        • mantisWorkerStageNumber
      • Example:
        curl -X POST \\\n-d '{\"mantisJobId\": \"foo\", \"mantisJobName\": \"bar\", \"mantisUser\": \"\"}' \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/config/common_tags\n
    • GET /metrics
      • Return an object containing lists of all metrics currently known to the Registry, grouped by type.
    • DELETE /metrics/A
      • Delete all AgeGauge metrics from the Registry.
    • DELETE /metrics/A/{id}
      • Delete one AgeGauge metric from the Registry, identified by the id.
      • Example:
        curl -X DELETE \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/metrics/A/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    • DELETE /metrics/g
      • Delete all Gauge metrics from the Registry.
    • DELETE /metrics/g/{id}
      • Delete one Gauge metric from the Registry, identified by the id.
      • Example:
        curl -X DELETE \\\n-w \" %{http_code}\\n\" \\\nhttp://localhost:1234/metrics/g/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    "},{"location":"spectator/agent/usage/#performance-numbers","title":"Performance Numbers","text":"

    A key goal of this project is to deliver high performance. This means that we need to use few resources for the common use case, where the number of metric updates is relatively small (< 10k reqs/sec), and it also needs to be able to handle hundreds of thousands of updates per second when required.

    Using Unix domain sockets, we can handle close to 1M metric updates per second, assuming the client batches the updates and sends a few at a time. Sending every single metric update requires a lot of context switching, but is something that works well for the majority of our use cases. This simplicity means the user does not have to maintain any local state.

    Transport          Batch Size    First 10M          Second 10M\nUnix Dgram         1             22.98s (435k rps)  20.58s (486k rps)\nUnix Dgram         8             11.46s (873k rps)   9.89s (1011k rps)\nUnix Dgram         32            10.38s (963k rps)   8.49s (1178k rps)\n

    The UDP transport is particularly sensitive the max receive buffer size (16MB on our systems).

    Our tests indicate that sending 430K rps to the UDP port did not drop packets, but if there is a need for higher throughput, then tweaking /proc/sys/net/unix/max_dgram_qlen is recommended.

    "},{"location":"spectator/core/clock/","title":"Clock","text":"

    When taking measurements or working with timers it is recommended to use the Clock interface. It provides two methods for measuring time:

    "},{"location":"spectator/core/clock/#wall-time","title":"Wall Time","text":"

    This is what most users think of for time. It can be used to get the current time like what you would see on a wall clock. In most cases when not running in tests this will call System.currentTimeMillis().

    Note that the values returned by this method may not be monotonically increasing. Just like a clock on your wall, this value can go back in time or jump forward at unpredictable intervals, if someone sets the time. On many systems, ntpd or similar daemons will be constantly keeping the time synced up with an authoritative source.

    With Spectator, the Clock is typically accessed via the Registry.

    Java usage example:

    // Current time in milliseconds since the epoch\nlong currentTime = registry.clock().wallTime();\n
    "},{"location":"spectator/core/clock/#monotonic-time","title":"Monotonic Time","text":"

    While it is good in general for the wall clock to show the correct time, the unpredictable changes mean it is not a good choice for measuring how long an operation took. Consider a simple example of measuring request latency on a server:

    long start = registry.clock().wallTime();\nhandleRequest(request, response);\nlong end = registry.clock().wallTime();\nreqLatencyTimer.record(end - start, TimeUnit.MILLISECONDS);\n

    If ntp fixes the server time between start and end, then the recorded latency will be wrong. Spectator will protect against obviously wrong measurements like negative latencies by dropping those values when they are recorded. However, the change could incorrectly shorten or lengthen the measured latency.

    The clock interface also provides access to a monotonic source that is only useful for measuring elapsed time, for example:

    long start = registry.clock().monotonicTime();\nhandleRequest(request, response);\nlong end = registry.clock().monotonicTime();\nreqLatencyTimer.record(end - start, TimeUnit.NANOSECONDS);\n

    In most cases this will map to System.nanoTime(). Note the actual value returned is not meaningful unless compared with another sample to get a delta.

    "},{"location":"spectator/core/clock/#manual-clock","title":"Manual Clock","text":"

    If timing code is written to the Clock interface, then alternative implementations can be plugged-in. For test cases, it is common to use ManualClock so that tests can be reliable and fast without having to rely on hacks like sleep or assuming something will run in less than a certain amount of time.

    ManualClock clock = new ManualClock();\nRegistry registry = new DefaultRegistry(clock);\n\nTimer timer = registry.timer(\"test\");\ntimer.record(() -> {\n  doSomething();\n  clock.setMonotonicTime(42L);\n});\n\nAssert.assertEquals(timer.totalTime(), 42L);\n
    "},{"location":"spectator/core/meters/counter/","title":"Counter","text":"

    A Counter is used to measure the rate at which some event is occurring. Considering a simple queue, Counters could be used to measure things like the rate at which items are being inserted and removed.

    Counters are reported to the backend as a rate-per-second. This makes it much easier to reason about the measurement and allows for aggregating the counter across instances.

    In Atlas, the :per-step operator can be used to convert them back into a count-per-step on a graph.

    Note

    For high performance code, such as incrementing in a tight loop that lasts less than a reporting interval, increment a local variable and add the final value to the counter after the loop has completed.

    "},{"location":"spectator/core/meters/counter/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/counter/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/counter/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/dist-summary/","title":"Distribution Summary","text":"

    A Distribution Summary is used to track the distribution of events. It is similar to a [Timer], but more general, in that the size does not have to be a period of time. For example, a distribution summary could be used to measure the payload sizes of requests hitting a server or the number of records returned from a query.

    It is recommended to always use base units when recording the data. So, if measuring the payload size use bytes, not kilobytes or some other unit. This allows the presentation layer for graphing to use either SI or IEC prefixes in a natural manner, and you do not need to consider the meaning of something like \"milli-milliseconds\".

    "},{"location":"spectator/core/meters/dist-summary/#querying","title":"Querying","text":"

    Note

    Distribution summaries report summarized statistics about the measurements for a time window including the totalAmount, count, max and totalOfSquares. If you were to simply query for the name of your timer via

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and\n

    you would get a nonsense value that is the sum of the reported statistics.

    When querying the results of a distribution summary, either select one of the statistics above via a filter, or use one of the operators below to generate a useful response.

    "},{"location":"spectator/core/meters/dist-summary/#average-measurement-dist-avg","title":"Average Measurement (:dist-avg)","text":"

    To compute the average latency across an arbitrary group, use the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/dist-summary/#maximum-measurement-dist-max","title":"Maximum Measurement (:dist-max)","text":"

    To compute the maximum latency across a group, use :dist-max:

    nf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-max,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/dist-summary/#standard-deviation-of-measurement-dist-stddev","title":"Standard Deviation of Measurement (:dist-stddev)","text":"

    To compute the standard deviation of measurements across all instances for a time interval:

    nnf.cluster,foo,:eq,\nname,http.req.payload.size,:eq,\n:and,\n:dist-stddev\n
    "},{"location":"spectator/core/meters/dist-summary/#raw-statistics","title":"Raw Statistics","text":"

    Note that it is possible to plot the individual statics by filtering on the statistic tag. If you choose to do so, note that the count, totalAmount and totalOfSquares are counters thus reported as rates per second, while the max is reported as a gauge.

    "},{"location":"spectator/core/meters/dist-summary/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/dist-summary/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/dist-summary/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/gauge/","title":"Gauge","text":"

    A Gauge is a value that is sampled at some point in time. Typical examples for Gauges would be the size of a queue, or the number of threads in a running state. Since Gauges are not updated inline when a state change occurs, there is no information about what might have occurred between samples.

    Consider monitoring the behavior of a queue of tasks. If the data is being collected once a minute, then a Gauge for the size will show the size when it was sampled (a.k.a. last-write-wins). The size may have been much higher or lower at some point during interval, but that is not known.

    "},{"location":"spectator/core/meters/gauge/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/gauge/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/gauge/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/core/meters/timer/","title":"Timer","text":"

    A Timer is used to measure how long (in seconds) some event is taking. Timer measurements are typically short, less than 1 minute.

    A selection of specialized timers include:

    • LongTaskTimer - Periodically reports the time taken for a long running task (> 1 minute). See the Long Task Timer pattern for details.
    • PercentileTimer - Useful if percentile approximations are needed in addition to basic stats. See the Percentile Timer pattern for details.
    "},{"location":"spectator/core/meters/timer/#querying","title":"Querying","text":"

    Note

    Timers report summarized statistics about the measurements for a time window including the totalTime, count, max and totalOfSquares. If you were to simply query for the name of your timer via

    nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and\n

    you would get a nonsense value that is the sum of the reported statistics.

    When querying the results of a timer, use one of the operators below to generate a useful response.

    "},{"location":"spectator/core/meters/timer/#average-measurement-dist-avg","title":"Average Measurement (:dist-avg)","text":"

    To compute the average latency across an arbitrary group, use the :dist-avg function:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-avg,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/timer/#maximum-measurement-dist-max","title":"Maximum Measurement (:dist-max)","text":"

    To compute the maximum latency across a group, use :dist-max:

    nf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-max,\n(,nf.asg,),:by\n
    "},{"location":"spectator/core/meters/timer/#standard-deviation-of-measurement-dist-stddev","title":"Standard Deviation of Measurement (:dist-stddev)","text":"

    To compute the standard deviation of measurements across all instances for a time interval:

    nnf.cluster,foo,:eq,\nname,http.req.latency,:eq,\n:and,\n:dist-stddev\n
    "},{"location":"spectator/core/meters/timer/#raw-statistics","title":"Raw Statistics","text":"

    Note that it is possible to plot the individual statics by filtering on the statistic tag. If you choose to do so, note that the count, totalAmount and totalOfSquares are counters thus reported as rates per second, while the max is reported as a gauge.

    "},{"location":"spectator/core/meters/timer/#languages","title":"Languages","text":""},{"location":"spectator/core/meters/timer/#first-class-support","title":"First-Class Support","text":"
    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/core/meters/timer/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/lang/overview/","title":"Overview","text":"

    The original Spectator library was written in Java, with the first stable version (0.35.0) released on Jan 18, 2016. Since then, there has been a proliferation of languages at Netflix which seek first-class observability support.

    After some thought and experimentation, we have settled on a strategy of developing minimal Spectator implementations in many languages, which function as thin clients that send data to Atlas. Our goal is to have partners invested in each experimental language who will provide the necessary expertise to develop idiomatic solutions, deliver real-world feedback on library usage, and shoulder some of the support and maintenance burden.

    We think this is a more sustainable path over the long-term than expanding our team to support N different languages for this singular polyglot use case.

    "},{"location":"spectator/lang/overview/#first-class-support","title":"First-Class Support","text":"

    These libraries are fully-supported by the team and see wide use across Netflix. Issues are fixed in a timely manner and updates are published regularly.

    • C++
    • Go
    • Java
    • Node.js
    • Python
    "},{"location":"spectator/lang/overview/#best-effort-support","title":"Best-Effort Support","text":"
    • Rust (internal library)
    "},{"location":"spectator/lang/cpp/usage/","title":"spectator-cpp Usage","text":"

    C++ thin-client metrics library for use with Atlas and SpectatorD.

    "},{"location":"spectator/lang/cpp/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    #include <spectator/registry.h>\n\n// use default values\nstatic constexpr auto kDefault = 0;\n\nstruct Request {\n  std::string country;\n};\n\nstruct Response {\n  int status;\n  int size;\n};\n\nclass Server {\n public:\n  explicit Server(spectator::Registry* registry)\n      : registry_{registry},\n        request_count_id_{registry->CreateId(\"server.requestCount\", spectator::Tags{})},\n        request_latency_{registry->GetTimer(\"server.requestLatency\")},\n        response_size_{registry->GetDistributionSummary(\"server.responseSizes\")} {}\n\n  Response Handle(const Request& request) {\n    auto start = std::chrono::steady_clock::now();\n\n    // do some work and obtain a response...\n    Response res{200, 64};\n\n    // Update the Counter id with dimensions, based on information in the request. The Counter\n    // will be looked up in the Registry, which is a fairly cheap operation, about the same as\n    // the lookup of an id object in a map. However, it is more expensive than having a local\n    // variable set to the Counter.\n    auto cnt_id = request_count_id_\n        ->WithTag(\"country\", request.country)\n        ->WithTag(\"status\", std::to_string(res.status));\n    registry_->GetCounter(std::move(cnt_id))->Increment();\n    request_latency_->Record(std::chrono::steady_clock::now() - start);\n    response_size_->Record(res.size);\n    return res;\n  }\n\n private:\n  spectator::Registry* registry_;\n  std::shared_ptr<spectator::Id> request_count_id_;\n  std::shared_ptr<spectator::Timer> request_latency_;\n  std::shared_ptr<spectator::DistributionSummary> response_size_;\n};\n\nRequest get_next_request() {\n  return Request{\"US\"};\n}\n\nint main() {\n  auto logger = spdlog::stdout_color_mt(\"console\"); \n  std::unordered_map<std::string, std::string> common_tags('xatlas.process', 'some-sidecar');\n  spectator::Config cfg{\"unix:/run/spectatord/spectatord.unix\", common_tags};\n  spectator::Registry registry{std::move(cfg), logger);\n\n  Server server{&registry};\n\n  for (auto i = 1; i <= 3; ++i) {\n    // get a request\n    auto req = get_next_request();\n    server.Handle(req);\n  }\n}\n
    "},{"location":"spectator/lang/cpp/usage/#high-volume-publishing","title":"High-Volume Publishing","text":"

    By default, the library sends every meter change to the spectatord sidecar immediately. This involves a blocking send call and underlying system calls, and may not be the most efficient way to publish metrics in high-volume use cases. For this purpose a simple buffering functionality in Publisher is implemented, and it can be turned on by passing a buffer size to the spectator::Config constructor. It is important to note that, until this buffer fills up, the Publisher will not send nay meters to the sidecar. Therefore, if your application doesn't emit meters at a high rate, you should either keep the buffer very small, or do not configure a buffer size at all, which will fall back to the \"publish immediately\" mode of operation.

    "},{"location":"spectator/lang/go/migrations/","title":"Migrations","text":""},{"location":"spectator/lang/go/migrations/#migrating-from-0x-to-2x","title":"Migrating from 0.X to 2.X","text":"

    Version 2.X consists of a major rewrite that turns spectator-go into a thin client designed to send metrics through spectatord. As a result some functionality has been moved to other packages or removed.

    "},{"location":"spectator/lang/go/migrations/#new","title":"New","text":""},{"location":"spectator/lang/go/migrations/#writers","title":"Writers","text":"

    spectator.Registry now supports different writers. The default writer is writer.UdpWriter which sends metrics to spectatord through UDP.

    Writers can be configured through spectator.Config.Location.

    Possible values are:

    • none: Configures a no-op writer that does nothing. Can be used to disable metrics collection.
    • stdout: Writes metrics to stdout.
    • stderr: Writes metrics to stderr.
    • memory: Writes metrics to memory. Useful for testing.
    • file:///path/to/file: Writes metrics to a file.
    • unix:///path/to/socket: Writes metrics to a Unix domain socket.
    • udp://host:port: Writes metrics to a UDP socket.

    Location can also be set through the environment variable SPECTATOR_OUTPUT_LOCATION. If both are set, the environment variable takes precedence over the passed config.

    The environment variable SPECTATOR_OUTPUT_LOCATION can be set to none to disable metrics collection.

    "},{"location":"spectator/lang/go/migrations/#meters","title":"Meters","text":"

    The following new Meters have been added:

    • meter.MaxGauge
    • meter.Gauge with TTL
    "},{"location":"spectator/lang/go/migrations/#common-tags","title":"Common Tags","text":"

    Common tags are now automatically added to all Meters. Their values are read from the environment variables.

    Tag Environment Variable nf.container TITUS_CONTAINER_NAME nf.process NETFLIX_PROCESS_NAME

    Tags from environment variables take precedence over tags passed on code when creating the Config.

    Note that common tags sourced by spectatord can't be overwritten.

    "},{"location":"spectator/lang/go/migrations/#config","title":"Config","text":"
    • Config is now created through a constructor which throws error if the passed in parameters are not valid.
    • Config members are now private.
    "},{"location":"spectator/lang/go/migrations/#moved","title":"Moved","text":"
    • Runtime metrics collection has been moved to spectator-go-runtime-metrics. Follow instructions in the README to enable collection.
    • Some types have been moved to different packages. For example, spectator.Counter is now in meter.Counter.
    "},{"location":"spectator/lang/go/migrations/#removed","title":"Removed","text":"
    • spectator.HttpClient has been removed. Use the standard http.Client instead.
    • spectator.Meters no longer has a Measure() []Measurement function. Meters are now stateless and do not store measurements.
    • spectator.Clock has been removed. Use the standard time package instead.
    • spectator.Config has been greatly simplified.
    • spectator.Registry no longer has a Start() function. The Registry is now effectively stateless and there is nothing to start other than opening the output location.
    • spectator.Registry no longer has a Stop() function. Instead, use Close() to close the registry. Once the registry is closed, it can't be started again.
    • spectator.Config.IpcTimerRecord has been removed. Use a meter.Timer instead to record Ipc metrics.
    • spectator.MeterFactoryFun has been removed. If you need to create a custom meter you can do so by wrapping one of the meters returned by spectator.Registry.
    • spectator.Registry no longer reports spectator.measurements metrics. Instead, you can use spectatord metrics to troubleshoot.
    • spectator.Registry no longer keep track of the Meters it creates. This means that you can't get a list of all Meters from the Registry. If you need to keep track of Meters, you can do so in your application code.
    • Percentile* meters no longer support defining min/max values.
    • spectator.Registry no longer allows setting a different logger after creation. A custom logger can be set in the spectator.Config before creating the Registry.
    • File-based configuration is no longer supported.
    "},{"location":"spectator/lang/go/migrations/#migration-steps","title":"Migration Steps","text":"
    1. Make sure you're not relying on any of the removed functionality.
    2. Update imports to use meters package instead of spectator for Meters.
    3. If you want to collect runtime metrics pull spectator-go-runtime-metrics and follow the instructions in the README.
    4. If you use PercentileDistributionSummary or PercentileTimer, then you need to update your code to use the respective functions provided by the Registry to initialize these meters.
    5. Remove dependency on Spectator Go Internal configuration library. Such dependency is no longer required.
    6. There is no longer an option to start or stop the registry at runtime. If you need to configure a Registry that doesn't emit metrics, for testing purposes, you can use the spectator.Config.Location option with none to configure a no-op writer.
    "},{"location":"spectator/lang/go/migrations/#writing-tests","title":"Writing Tests","text":"

    To write tests against this library, instantiate a test instance of the Registry and configure it to use the MemoryWriter, which stores all updates in an Array. Maintain a handle to the MemoryWriter, then inspect the Lines() to verify your metrics updates. See the source code for more testing examples.

    package app\n\nimport (\n    \"fmt\"\n    \"github.com/Netflix/spectator-go/v2/spectator/logger\"\n    \"github.com/Netflix/spectator-go/v2/spectator/writer\"\n    \"testing\"\n    \"time\"\n)\n\nfunc TestRegistryWithMemoryWriter_Counter(t *testing.T) {\n    mw := &writer.MemoryWriter{}\n    r := NewTestRegistry(mw)\n\n    counter := r.Counter(\"test_counter\", nil)\n    counter.Increment()\n    expected := \"c:test_counter:1\"\n    if len(mw.Lines()) != 1 || mw.Lines()[0] != expected {\n        t.Errorf(\"Expected '%s', got '%s'\", expected, mw.Lines()[0])\n    }\n}\n\nfunc NewTestRegistry(mw *writer.MemoryWriter) Registry {\n    return &spectatordRegistry{\n        config: &Config{},\n        writer: mw,\n        logger: logger.NewDefaultLogger(),\n    }\n}\n
    "},{"location":"spectator/lang/go/usage/","title":"spectator-go Usage","text":"

    Go thin-client metrics library for use with Atlas and SpectatorD.

    "},{"location":"spectator/lang/go/usage/#supported-go-versions","title":"Supported Go Versions","text":"

    This library currently targets the latest two stable versions of Go.

    There is one language feature used in the project which requires at least 1.21 - the log/slog structured logging library.

    "},{"location":"spectator/lang/go/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    package main\n\nimport (\n    \"github.com/Netflix/spectator-go/v2/spectator\"\n    \"github.com/Netflix/spectator-go/v2/spectator/meter\"\n    \"strconv\"\n    \"time\"\n)\n\ntype Server struct {\n    registry       spectator.Registry\n    requestCountId *meter.Id\n    requestLatency *meter.Timer\n    responseSizes  *meter.DistributionSummary\n}\n\ntype Request struct {\n    country string\n}\n\ntype Response struct {\n    status int\n    size   int64\n}\n\nfunc (s *Server) Handle(request *Request) (res *Response) {\n    start := time.Now()\n\n    // initialize response\n    res = &Response{200, 64}\n\n    // Update the counter with dimensions based on the request.\n    tags := map[string]string{\n        \"country\": request.country,\n        \"status\":  strconv.Itoa(res.status),\n    }\n    requestCounterWithTags := s.requestCountId.WithTags(tags)\n    counter := s.registry.CounterWithId(requestCounterWithTags)\n    counter.Increment()\n\n    // ...\n    s.requestLatency.Record(time.Since(start))\n    s.responseSizes.Record(res.size)\n    return\n}\n\nfunc newServer(registry spectator.Registry) *Server {\n    return &Server{\n        registry,\n        registry.NewId(\"server.requestCount\", nil),\n        registry.Timer(\"server.requestLatency\", nil),\n        registry.DistributionSummary(\"server.responseSizes\", nil),\n    }\n}\n\nfunc getNextRequest() *Request {\n    // ...\n    return &Request{\"US\"}\n}\n\nfunc main() {\n    commonTags := map[string]string{\"nf.platform\": \"my_platform\", \"process_name\": \"my_process\"}\n    // if desired, replace the logger with a custom one, using the third parameter here:\n    config, _ := spectator.NewConfig(\"\", commonTags, nil)\n\n    registry, _ := spectator.NewRegistry(config)\n    defer registry.Close()\n\n    server := newServer(registry)\n\n    for i := 1; i < 3; i++ {\n        // get a request\n        req := getNextRequest()\n        server.Handle(req)\n    }\n}\n
    "},{"location":"spectator/lang/go/usage/#logging","title":"Logging","text":"

    Logging is implemented with the standard Golang slog package. The logger defines interfaces for Debugf, Infof, and Errorf. There are useful messages implemented at the Debug level which can help diagnose the metric publishing workflow. The logger can be overridden by providing one as the third parameter of the Config constructor.

    "},{"location":"spectator/lang/go/usage/#runtime-metrics","title":"Runtime Metrics","text":"

    Use spectator-go-runtime-metrics. Follow instructions in the README to enable collection.

    "},{"location":"spectator/lang/java/servo-migration/","title":"Servo Migration","text":""},{"location":"spectator/lang/java/servo-migration/#servo-comparison","title":"Servo Comparison","text":"

    Servo is an alternative client monitoring library that is also developed by Netflix. Originally, Spectator was an experiment for a simpler API that wrapped Servo. It was done as a separate project to avoid breaking backwards compatibility for Servo.

    From a user perspective, both will be supported for a long time, but most of our efforts for future improvement will go to Spectator. For new code, it is recommended to use the spectator API. If running at Netflix, the correct bindings will be in place for both Servo and Spectator.

    "},{"location":"spectator/lang/java/servo-migration/#differences","title":"Differences","text":"

    This section provides a quick summary of the differences between Spectator and Servo.

    "},{"location":"spectator/lang/java/servo-migration/#simpler-api","title":"Simpler API","text":"

    Servo gives the user a lot of control, but this makes it hard to use correctly. For example, to create a Counter, the user needs to understand the trade-offs and choose between:

    • BasicCounter
    • DynamicCounter
    • ContextualCounter
    • StepCounter

    Further, each of these can impact how data is reported to observers. The Spectator API focuses on the constructs a user needs to instrument the code. In Spectator, the user would always use the Registry to create a Counter. The implementation details are left up to the Registry.

    The registration is simpler as well to avoid common pitfalls when using Servo like overwriting a registered object.

    "},{"location":"spectator/lang/java/servo-migration/#more-focused","title":"More Focused","text":"

    The goal of Spectator is instrumenting code to send to a dimensional time-series system like Atlas. Servo has goals of staying compatible with a number of legacy libraries and naming formats, exposing data to JMX, etc. Examples of how this influences decisions:

    • No support for non-numeric data. Servo supported this feature, so that it can expose data to JMX. Exposing the numeric data registered in Spectator to JMX can be done using a registry that supports it, but there is no goal to be a general interface for exposing arbitrary data in JMX.
    • No support for custom time units when reporting timer data. Base units should always be used for reporting and conversions can be performed in the presentation layer, if needed. It also avoids a lot of the confusion around the timer unit for the data and issues like creating aggregates that are meaningless due to mixed units.

    It is better to have a simple way to send correct and easy-to-understand data to the backend than many options. If you want more knobs, then you can use Servo.

    "},{"location":"spectator/lang/java/servo-migration/#di-friendly","title":"DI Friendly","text":"

    When Servo was originally written, dependency injection (DI) was not heavily used at Netflix. Further, Servo needed to stay compatible with a number of use-cases that were heavily static.

    While Spectator does have a static registry that can be used, the recommended way is to create a registry and inject it either manually or via a framework into the classes that need it. This also makes it much easier to test in isolation.

    "},{"location":"spectator/lang/java/servo-migration/#migration","title":"Migration","text":"

    If you want to migrate from the Servo API to the Spectator API, then this section provides some guides on how Servo constructs can be ported over. The sub-sections are the class names of monitor types supported by Servo.

    For users at Netflix, we are not actively pushing teams to migrate or do any additional work. Servo is still supported and if it works for your use-case, then feel free to continue using it.

    "},{"location":"spectator/lang/java/servo-migration/#registration","title":"Registration","text":"

    First read through the Servo docs on registration. With Servo, say you have a class like the following:

    public class Foo {\n\n  private AtomicInteger gauge;\n  private Counter counter;\n\n  public Foo(String id) {\n    gauge = new AtomicInteger();\n    counter = new BasicCounter(MonitorConfig.builder(\"counter\").build());\n    Monitors.registerObject(id, this);\n  }\n\n  @Monitor(name = \"gauge\", type = DataSourceType.GAUGE)\n  private int gauge() {\n    return gauge.get();\n  }\n\n  public void doSomething() {\n    ...\n  }\n}\n

    The state of the class is in the member variables of an instance of Foo. If multiple instances of class Foo are created with the same value for id, then the last one will overwrite the others for the registration. So the values getting reported will only be from the last instance registered. Also the registry has a reference to the instance of Foo, so it will never go away.

    For Counters and Timers, one way to get around this is to use DynamicCounter and DynamicTimer, respectively. Those classes will automatically handle the registration and expire if there is no activity. They also get used for cases where the set of dimensions is not known up front.

    Gauges need to sample the state of something, so they need to have a reference to an object that contains the state. So the user would need to ensure that only a single copy was registered leading to patterns like:

    class Foo {\n\n  private static class FooStats {\n\n    private AtomicInteger gauge;\n    private Counter counter;\n\n    public FooStats(String id) {\n      gauge = new AtomicInteger();\n      counter = new BasicCounter(MonitorConfig.builder(\"counter\").build());\n      Monitors.registerObject(id, this);\n    }\n\n    @Monitor(name = \"gauge\", type = DataSourceType.GAUGE)\n    private int gauge() {\n      return gauge.get();\n    }\n  }\n\n  private static ConcurrentHashMap<String, FooStats> STATS =\n    new ConcurrentHashMap<>();\n\n  private final FooStats stats;\n\n  public Foo(String id) {\n    stats = STATS.computeIfAbsent(id, (i) -> new FooStats(i));\n  }\n\n  public void doSomething() {\n    ...\n    stats.update();\n  }\n}\n

    This ensures that there is a single copy for a given id. In spectator this example would look like:

    public class Foo {\n\n  private AtomicInteger gauge;\n  private Counter counter;\n\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"gauge\").withTag(\"id\", id);\n    gauge = registry.gauge(gaugeId, new AtomicInteger());\n    counter = registry.counter(\"counter\", \"id\", id);\n  }\n\n  public void doSomething() {\n    ...\n  }\n}\n

    Everything using the same Registry will get the same Counter instance, if the same id is used. For the Gauge, the Registry will keep a weak reference and will sum the values if multiple instances are present. Since it is a weak reference, nothing will prevent an instance of Foo from getting garbage collected.

    "},{"location":"spectator/lang/java/servo-migration/#annotations","title":"Annotations","text":"

    Annotations are not supported, use the appropriate meter type:

    DataSourceType Spectator Alternative COUNTER Counter Usage GAUGE Gauge Usage INFORMATIONAL Not supported"},{"location":"spectator/lang/java/servo-migration/#basiccounter","title":"BasicCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n  private final Counter c =\n    new BasicCounter(MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Counter c;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    c = registry.counter(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicgauge","title":"BasicGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final BasicGauge g = new BasicGauge(\n    MonitorConfig.builder(\"name\").build(),\n    this::getCurrentValue);\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    registry.gauge(gaugeId, this, Foo::getCurrentValue);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basictimer","title":"BasicTimer","text":"

    See the general overview of registration differences and summary of Timer usage. In Spectator, the reported unit for Timers is always seconds and cannot be changed. Seconds is the base unit and other units should only be used as a presentation detail. Servo allows the unit to be customized and defaults to milliseconds.

    Servo:

    public class Foo {\n  private final Timer t = new BasicTimer(\n    MonitorConfig.builder(\"name\").build(), TimeUnit.SECONDS);\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    Stopwatch s = t.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Timer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    t = registry.timer(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    t.record(() -> {\n      ...\n    });\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicdistributionsummary","title":"BasicDistributionSummary","text":"

    See the general overview of registration differences and summary of Distribution Summary usage.

    Servo:

    public class Foo {\n  private final BasicDistributionSummary s = new BasicDistributionSummary(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    ...\n    s.record(getValue());\n  }\n}\n

    Spectator:

    public class Foo {\n  private final DistributionSummary s;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    s = registry.distributionSummary(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    ...\n    s.record(getValue());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#basicinformational","title":"BasicInformational","text":"

    Not supported, see the overview of differences.

    "},{"location":"spectator/lang/java/servo-migration/#basicstopwatch","title":"BasicStopwatch","text":"

    There isn't an explicit stopwatch class in Spectator. Use a timing call directly.

    Servo:

      public void doSomething() {\n    Stopwatch s = timer.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n

    Spectator:

      public void doSomething() {\n    final long s = System.nanoTime();\n    try {\n      ...\n    } finally {\n      timer.record(System.nanoTime() - s, TimeUnit.NANOSECONDS);\n    }\n  }\n
    "},{"location":"spectator/lang/java/servo-migration/#buckettimer","title":"BucketTimer","text":"

    See the general overview of registration differences.

    Servo:

    public class Foo {\n  private final Timer t = new BucketTimer(\n    MonitorConfig.builder(\"name\").build(),\n    new BucketConfig.Builder()\n      .withTimeUnit(TimeUnit.MILLISECONDS)\n      .withBuckets(new long[] { 500, 2500, 5000, 10000 })\n      .build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    Stopwatch s = t.start();\n    try {\n      ...\n    } finally {\n      s.stop();\n    }\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Timer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id timerId = registry.createId(\"name\", \"id\", id);\n    BucketFunction f = BucketFunctions.latency(10, TimeUnit.SECONDS);\n    t = BucketTimer.get(registry, timerId, f);\n  }\n\n  public void doSomething() {\n    t.record(() -> {\n      ...\n    });\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#contextualcounter","title":"ContextualCounter","text":"

    Not supported. A fixed tag list for the context is too rigid and this class was never used much at Netflix. Future work being looked at in issue-180.

    "},{"location":"spectator/lang/java/servo-migration/#contextualtimer","title":"ContextualTimer","text":"

    Not supported. A fixed tag list for the context is too rigid and this class was never used much at Netflix. Future work being looked at in issue-180.

    "},{"location":"spectator/lang/java/servo-migration/#doublegauge","title":"DoubleGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final DoubleGauge g = new DoubleGauge(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    import com.google.common.util.concurrent.AtomicDouble;\n\npublic class Foo {\n  private final AtomicDouble v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicDouble());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#durationtimer","title":"DurationTimer","text":"

    See the general overview of registration differences, the summary of Timer usage, and Long Task Timer usage.

    Servo:

    public class Foo {\n  private final DurationTimer t = new DurationTimer(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final LongTaskTimer t;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    t = registry.longTaskTimer(\"name\", \"id\", id);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#dynamiccounter","title":"DynamicCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n\n  private final String id;\n\n  public Foo(String id) {\n    this.id = id;\n  }\n\n  public void doSomething(Context ctxt) {\n    DynamicCounter.increment(\"staticId\", \"id\", id);\n    DynamicCounter.increment(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo());\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Registry registry;\n  private final String id;\n  private final Counter staticCounter;\n  private final Id dynamicId;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    this.registry = registry;\n    this.id = id;\n    staticCounter = registry.counter(\"staticId\", \"id\", id);\n    dynamicId = registry.createId(\"dynamicId\", \"id\", id);\n  }\n\n  public void doSomething(Context ctxt) {\n    // Keeping the reference to the counter avoids additional allocations\n    // to create the id object and the lookup cost\n    staticCounter.increment();\n\n    // If the id is dynamic it must be looked up\n    registry.counter(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo()).increment();\n\n    // This will update the same counter as the line above, but the base part\n    // of the id is precomputed to make it cheaper to construct the id.\n    registry.counter(dynamicId.withTag(\"foo\", ctxt.getFoo())).increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#dynamictimer","title":"DynamicTimer","text":"

    See the general overview of registration differences and summary of Timer usage.

    Servo:

    public class Foo {\n\n  private final String id;\n  private final MonitorConfig staticId;\n\n  public Foo(String id) {\n    this.id = id;\n    staticId = MonitorConfig.builder(\"staticId\").withTag(\"id\", id).build();\n  }\n\n  public void doSomething(Context ctxt) {\n    final long d = ctxt.getDurationMillis();\n    DynamicTimer.record(staticId, TimeUnit.SECONDS, d, TimeUnit.MILLISECONDS);\n\n    MonitorConfig dynamicId = MonitorConfig.builder(\"dynamicId\")\n      .withTag(\"id\", id)\n      .withTag(\"foo\", ctxt.getFoo())\n      .build();\n    DynamicTimer.record(dynamicId, TimeUnit.SECONDS, d, TimeUnit.MILLISECONDS);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Registry registry;\n  private final String id;\n  private final Timer staticTimer;\n  private final Id dynamicId;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    this.registry = registry;\n    this.id = id;\n    staticTimer = registry.timer(\"staticId\", \"id\", id);\n    dynamicId = registry.createId(\"dynamicId\", \"id\", id);\n  }\n\n  public void doSomething(Context ctxt) {\n    final long d = ctxt.getDurationMillis();\n\n    // Keeping the reference to the timer avoids additional allocations\n    // to create the id object and the lookup cost\n    staticTimer.record(d, TimeUnit.MILLISECONDS);\n\n    // If the id is dynamic it must be looked up\n    registry.timer(\"dynamicId\", \"id\", id, \"foo\", ctxt.getFoo())\n      .record(d, TimeUnit.MILLISECONDS);\n\n    // This will update the same timer as the line above, but the base part\n    // of the id is precomputed to make it cheaper to construct the id.\n    registry.timer(dynamicId.withTag(\"foo\", ctxt.getFoo()))\n      .record(d, TimeUnit.MILLISECONDS);\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#longgauge","title":"LongGauge","text":"

    See the general overview of registration differences and summary of Gauge usage.

    Servo:

    public class Foo {\n  private final LongGauge g = new LongGauge(\n    MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final AtomicLong v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicLong());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#monitorconfig","title":"MonitorConfig","text":"

    See the documentation on naming.

    Servo:

    MonitorConfig id = MonitorConfig.builder(\"name\")\n  .withTag(\"country\", \"US\")\n  .withTag(\"device\",  \"xbox\")\n  .build();\n

    Spectator:

    Id id = registry.createId(\"name\")\n  .withTag(\"country\", \"US\")\n  .withTag(\"device\",  \"xbox\");\n\n// or\n\nId id = registry.createId(\"name\", \"country\", \"US\", \"device\", \"xbox\");\n
    "},{"location":"spectator/lang/java/servo-migration/#monitoredcache","title":"MonitoredCache","text":"

    Not supported because Spectator does not have a direct dependency on Guava. If there is enough demand, an extension can be created.

    "},{"location":"spectator/lang/java/servo-migration/#numbergauge","title":"NumberGauge","text":"

    See the general overview of registration differences and summary of gauge usage.

    Servo:

    public class Foo {\n  private final NumberGauge g = new NumberGauge(\n    MonitorConfig.builder(\"name\").build(), new AtomicLong());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n}\n

    Spectator:

    public class Foo {\n  private final AtomicLong v;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    Id gaugeId = registry.createId(\"name\").withTag(\"id\", id);\n    v = registry.gauge(gaugeId, new AtomicLong());\n  }\n}\n
    "},{"location":"spectator/lang/java/servo-migration/#statstimer","title":"StatsTimer","text":"

    Not supported, see overview of differences.

    "},{"location":"spectator/lang/java/servo-migration/#stepcounter","title":"StepCounter","text":"

    See the general overview of registration differences and summary of Counter usage.

    Servo:

    public class Foo {\n  private final Counter c =\n    new StepCounter(MonitorConfig.builder(\"name\").build());\n\n  public Foo(String id) {\n    Monitors.registerObject(id, this);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n

    Spectator:

    public class Foo {\n  private final Counter c;\n\n  @Inject\n  public Foo(Registry registry, String id) {\n    c = registry.counter(\"name\", \"id\", id);\n  }\n\n  public void doSomething() {\n    c.increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/","title":"Testing","text":"

    Testing should be relatively straightforward if you are using injection for the Registry. Consider a sample class:

    public class Foo {\n\n  private final Counter counter;\n\n  @Inject\n  public Foo(Registry registry) {\n    counter = registry.counter(\"foo\");\n  }\n\n  public void doSomething() {\n    counter.increment();\n  }\n}\n

    Tests will typically want to use an isolated instance of the DefaultRegistry.

    "},{"location":"spectator/lang/java/testing/#simple-test","title":"Simple Test","text":"

    A basic standalone test class would look something like:

    public class FooTest {\n\n  private Registry registry;\n  private Foo foo;\n\n  @Before\n  public void init() {\n    registry = new DefaultRegistry();\n    foo = new Foo(registry);\n  }\n\n  @Test\n  public void doSomething() {\n    foo.doSomething();\n    Assert.assertEquals(1, registry.counter(\"foo\").count());\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/#spring-test","title":"Spring Test","text":"

    If using Spring, then you can create a binding for the DefaultRegistry, for example:

    public class FooTest {\n\n  private Registry registry;\n  private Foo foo;\n\n  @Configuration\n  public static class TestConfiguration {\n    @Bean\n    public Registry registry() {\n      return new DefaultRegistry();\n    }\n\n    @Bean\n    public Foo foo(Registry registry) {\n      return new Foo(registry);\n    }\n  }\n\n  private AnnotationConfigApplicationContext createContext() {\n    AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();\n    context.register(TestConfiguration.class);\n    context.refresh();\n    return context;\n  }\n\n  @Test\n  public void doSomething() {\n    try (AnnotationConfigApplicationContext context = createContext()) {\n      Foo foo = context.getBean(Foo.class);\n      foo.doSomething();\n\n      Registry registry = context.getBean(Registry.class);\n      Assert.assertEquals(1, registry.counter(\"foo\").count());\n    }\n  }\n}\n
    "},{"location":"spectator/lang/java/testing/#exceptions","title":"Exceptions","text":"

    By default, for most user errors Spectator will log a warning rather than throw an exception. The rationale is that users do not often think about instrumentation and logging code causing an exception and interrupting the control flow of a program. However, for test cases it is recommended to be more aggressive and learn about problems as early as possible. This can be done by setting a system property:

    spectator.api.propagateWarnings=true\n

    Consider an example:

    private static final Id RARE_EXCEPTION_ID = null;\n\npublic void doSomethingImportant() {\n  try {\n    ... do work ...\n  } catch (RareException e) {\n    // There is a bug in the program, an Id is not allowed to be null. In production we do\n    // not want it to throw and interrupt the control flow. Instrumentation should gracefully\n    // degrade.\n    registry.counter(RARE_EXCEPTION_ID).increment();\n\n    // These statements are important to provide context for operating the system\n    // and to ensure the app continues to function properly.\n    LOGGER.error(\"important context for user\", e);\n    properlyHandleException(e);\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/","title":"Usage","text":""},{"location":"spectator/lang/java/usage/#project","title":"Project","text":"
    • Source
    • Javadoc
    • Product Lifecycle: GA
    • Requirements: Java >= 8
    "},{"location":"spectator/lang/java/usage/#install-library","title":"Install Library","text":"
    1. Depend on the API library, which is available in Maven Central. The only transitive dependency is slf4j. For Gradle, the dependency is specified as follows:

      dependencies {\n    compile \"com.netflix.spectator:spectator-api:0.101.0\"\n}\n
    2. Pick a Registry to bind, when initializing the application.

    3. If running at Netflix, see the Netflix Integration section.

    "},{"location":"spectator/lang/java/usage/#instrumenting-code","title":"Instrumenting Code","text":"

    Suppose we have a server and we want to keep track of:

    • Number of requests received with dimensions for breaking down by status code, country, and the exception type if the request fails in an unexpected way.
    • Latency for handling requests.
    • Summary of the response sizes.
    • Current number of active connections on the server.

    Here is some sample code that does that:

    // In the application initialization setup a registry\nRegistry registry = new DefaultRegistry();\nServer s = new Server(registry);\n\npublic class Server {\n  private final Registry registry;\n  private final Id requestCountId;\n  private final Timer requestLatency;\n  private final DistributionSummary responseSizes;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n\n    // Create a base id for the request count. The id will get refined with\n    // additional dimensions when we receive a request.\n    requestCountId = registry.createId(\"server.requestCount\");\n\n    // Create a timer for tracking the latency. The reference can be held onto\n    // to avoid additional lookup cost in critical paths.\n    requestLatency = registry.timer(\"server.requestLatency\");\n\n    // Create a distribution summary meter for tracking the response sizes.\n    responseSizes = registry.distributionSummary(\"server.responseSizes\");\n\n    // Gauge type that can be sampled. In this case it will invoke the\n    // specified method via reflection to get the value. The registry will\n    // keep a weak reference to the object passed in so that registration will\n    // not prevent garbage collection of the server object.\n    registry.methodValue(\"server.numConnections\", this, \"getNumConnections\");\n  }\n\n  public Response handle(Request req) {\n    final long s = System.nanoTime();\n    requestLatency.record(() -> {\n      try {\n        Response res = doSomething(req);\n\n        // Update the counter id with dimensions based on the request. The\n        // counter will then be looked up in the registry which should be\n        // fairly cheap, such as lookup of id object in a ConcurrentHashMap.\n        // However, it is more expensive than having a local variable seti\n        // to the counter.\n        final Id cntId = requestCountId\n          .withTag(\"country\", req.country())\n          .withTag(\"status\", res.status());\n        registry.counter(cntId).increment();\n\n        responseSizes.record(res.body().size());\n\n        return res;\n      } catch (Exception e) {\n        final Id cntId = requestCountId\n          .withTag(\"country\", req.country())\n          .withTag(\"status\", \"exception\")\n          .withTag(\"error\", e.getClass().getSimpleName());\n        registry.counter(cntId).increment();\n        throw e;\n      }\n    });\n  }\n\n  public int getNumConnections() {\n    // however we determine the current number of connections on the server\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/#netflix-integration","title":"Netflix Integration","text":"

    When running at Netflix, use the atlas-client library to enable transferring the instrumented data to Atlas. See the appropriate section for the type of project you are working on:

    • Libraries
    • SBN Applications, specifically standalone apps using SBN.
    "},{"location":"spectator/lang/java/usage/#libraries","title":"Libraries","text":"

    For libraries, the only dependency that should be needed is:

    com.netflix.spectator:spectator-api:0.101.0\n

    The bindings to integrate internally should be included with the application. In your code, just inject a Registry, e.g.:

    public class Foo {\n  @Inject\n  public Foo(Registry registry) {\n    ...\n  }\n  ...\n}\n

    See the testing docs for more information about creating a binding to use with tests. Libraries should not install a particular registry. The bindings to use for the Registry should be determined by the application that is using the library. Think of it as being like slf4j where logging configuration is up to the end-user, not the library owner.

    You may want to avoid binding errors if the end-user has not provided a binding for the Spectator registry. For Spring, this can be done by using optional injections, for example:

    // Sample library class\npublic class MyLib {\n  Registry registry;\n\n  @Inject\n  public MyLib(Optional<Registry> registryOpt) {\n    this.registry = registryOpt.orElseGet(NoopRegistry::new);\n  }\n}\n
    "},{"location":"spectator/lang/java/usage/#sbn-applications","title":"SBN Applications","text":"

    Applications should include spring-boot-netflix-starter-metrics which will configure the registry bindings for internal use.

    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/","title":"Buffer Pools","text":"

    Buffer pools, such as direct byte buffers, can be monitored at a high level using the BufferPoolMXBean provided by the JDK.

    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#getting-started","title":"Getting Started","text":"

    To get information about buffer pools in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#jvmbuffercount","title":"jvm.buffer.count","text":"

    Gauge showing the current number of distinct buffers.

    Unit: count

    Dimensions:

    • id: type of buffers. Value will be either direct for direct byte buffers or mapped for memory mapped files.
    "},{"location":"spectator/lang/java/ext/jvm-buffer-pools/#jvmbuffermemoryused","title":"jvm.buffer.memoryUsed","text":"

    Gauge showing the current number of bytes used by all buffers.

    Unit: bytes

    Dimensions:

    • id: type of buffers. Value will be either direct for direct byte buffers or mapped for memory mapped files.
    "},{"location":"spectator/lang/java/ext/jvm-classloading/","title":"Class Loading","text":"

    Uses the ClassLoadingMXBean provided by the JDK to monitor the number of classes loaded and unloaded.

    "},{"location":"spectator/lang/java/ext/jvm-classloading/#getting-started","title":"Getting Started","text":"

    To get information about classloading in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-classloading/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-classloading/#jvmclassloadingclassesloaded","title":"jvm.classloading.classesLoaded","text":"

    Counter reporting the number of classes loaded.

    Unit: classes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-classloading/#jvmclassloadingclassesunloaded","title":"jvm.classloading.classesUnloaded","text":"

    Counter reporting the number of classes unloaded.

    Unit: classes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-compilation/","title":"Compilation","text":"

    Uses the CompilationMXBean provided by the JDK to monitor the time spent compiling code, for each compiler name.

    "},{"location":"spectator/lang/java/ext/jvm-compilation/#getting-started","title":"Getting Started","text":"

    To get information about compilation in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-compilation/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-compilation/#jvmcompilationcompilationtime","title":"jvm.compilation.compilationTime","text":"

    Counter reporting the amount of elapsed time spent in compilation. If multiple threads are used for compilation, then this value represents the summation of the time each thread spent in compilation.

    Unit: seconds/second

    Dimensions:

    • compiler: name of the just-in-time (JIT) compiler
    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/","title":"GC Causes","text":"

    The various GC causes aren't well documented. The list provided here comes from the gcCause.cpp file in the jdk and we include some information on what these mean for the application.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#systemgc__","title":"System.gc__","text":"

    Something called System.gc(). If you are seeing this once an hour it is likely related to the RMI GC interval. For more details see:

    • Unexplained System.gc() calls due to Remote Method Invocation (RMI) or explict garbage collections
    • sun.rmi.dgc.client.gcInterval
    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#fullgcalot","title":"FullGCAlot","text":"

    Most likely you'll never see this value. In debug builds of the jdk there is an option, -XX:+FullGCALot, that will trigger a full GC at a regular interval for testing purposes.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#scavengealot","title":"ScavengeAlot","text":"

    Most likely you'll never see this value. In debug builds of the jdk there is an option, -XX:+ScavengeALot, that will trigger a minor GC at a regular interval for testing purposes.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#allocation_profiler","title":"Allocation_Profiler","text":"

    Prior to java 8 you would see this if running with the -Xaprof setting. It would be triggered just before the jvm exits. The -Xaprof option was removed in java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#jvmtienv_forcegarbagecollection","title":"JvmtiEnv_ForceGarbageCollection","text":"

    Something called the JVM tool interface function ForceGarbageCollection. Look at the -agentlib param to java to see what agents are configured.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#gclocker_initiated_gc","title":"GCLocker_Initiated_GC","text":"

    The GC locker prevents GC from occurring when JNI code is in a critical region. If GC is needed while a thread is in a critical region, then it will allow them to complete, i.e. call the corresponding release function. Other threads will not be permitted to enter a critical region. Once all threads are out of critical regions a GC event will be triggered.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#heap_inspection_initiated_gc","title":"Heap_Inspection_Initiated_GC","text":"

    GC was initiated by an inspection operation on the heap. For example you can trigger this with jmap:

    $ jmap -histo:live <pid>

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#heap_dump_initiated_gc","title":"Heap_Dump_Initiated_GC","text":"

    GC was initiated before dumping the heap. For example you can trigger this with jmap:

    $ jmap -dump:live,format=b,file=heap.out <pid>

    Another common example would be clicking the Heap Dump button on the Monitor tab in VisualVM.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#whitebox_initiated_young_gc","title":"WhiteBox_Initiated_Young_GC","text":"

    Most likely you'll never see this value. Used for testing hotspot, it indicates something called sun.hotspot.WhiteBox.youngGC().

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#no_gc","title":"No_GC","text":"

    Used for CMS to indicate concurrent phases.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#allocation_failure","title":"Allocation_Failure","text":"

    Usually this means that there is an allocation request that is bigger than the available space in young generation and will typically be associated with a minor GC. For G1 this will likely be a major GC and it is more common to see G1_Evacuation_Pause for routine minor collections.

    On linux the jvm will trigger a GC if the kernel indicates there isn't much memory left via mem_notify.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#tenured_generation_full","title":"Tenured_Generation_Full","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#permanent_generation_full","title":"Permanent_Generation_Full","text":"

    Triggered as a result of an allocation failure in PermGen. Pre java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#metadata_gc_threshold","title":"Metadata_GC_Threshold","text":"

    Triggered as a result of an allocation failure in Metaspace. Metaspace replaced PermGen was added in java 8.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_generation_full","title":"CMS_Generation_Full","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_initial_mark","title":"CMS_Initial_Mark","text":"

    Initial mark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_final_remark","title":"CMS_Final_Remark","text":"

    Remark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#cms_concurrent_mark","title":"CMS_Concurrent_Mark","text":"

    Concurrent mark phase of CMS, for more details see Phases of CMS. Unfortunately it doesn't appear to be reported via the mbeans and we just get No_GC.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#old_generation_expanded_on_last_scavenge","title":"Old_Generation_Expanded_On_Last_Scavenge","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#old_generation_too_full_to_scavenge","title":"Old_Generation_Too_Full_To_Scavenge","text":"

    Not used?

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#ergonomics","title":"Ergonomics","text":"

    This indicates you are using the adaptive size policy, -XX:+UseAdaptiveSizePolicy and is on by default for recent versions, with the parallel collector (-XX:+UseParallelGC). For more details see The Why of GC Ergonomics.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#g1_evacuation_pause","title":"G1_Evacuation_Pause","text":"

    An evacuation pause is the most common young gen cause for G1 and indicates that it is copying live objects from one set of regions, young and sometimes young + old, to another set of regions. For more details see Understanding G1 GC Logs.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#g1_humongous_allocation","title":"G1_Humongous_Allocation","text":"

    A humongous allocation is one where the size is greater than 50% of the G1 region size. Before a humongous allocation the jvm checks if it should do a routine evacuation pause without regard to the actual allocation size, but if triggered due to this check the cause will be listed as humongous allocation. This cause is also used for any collections used to free up enough space for the allocation.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#last_ditch_collection","title":"Last_ditch_collection","text":"

    For perm gen (java 7 or earlier) and metaspace (java 8+) a last ditch collection will be triggered if an allocation fails and the memory pool cannot be expanded.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#illegal_value_-last_gc_cause-_illegal_value","title":"ILLEGAL_VALUE_-last_gc_cause-_ILLEGAL_VALUE","text":"

    Included for completeness, but you should never see this value.

    "},{"location":"spectator/lang/java/ext/jvm-gc-causes/#unknown_gccause","title":"unknown_GCCause","text":"

    Included for completeness, but you should never see this value.

    "},{"location":"spectator/lang/java/ext/jvm-gc/","title":"Garbage Collection","text":"

    The GC module registers with the notification emitter of the GarbageCollectorMXBean to provide some basic GC logging and metrics.

    • Getting started
    • Logging
    • Metrics
    • Alerting
    "},{"location":"spectator/lang/java/ext/jvm-gc/#getting-started","title":"Getting Started","text":"

    For using it internally at Netflix, see the Java Usage guide, otherwise keep reading this section.

    "},{"location":"spectator/lang/java/ext/jvm-gc/#requirements","title":"Requirements","text":"

    This library relies on the notification emitter added in 7u4, but there are known issues prior to 7u40. There is also a regression impacting Java 9 and higher, see #502 and JDK-8196325 for more information. For G1, it is recommended to be on the latest version available.

    "},{"location":"spectator/lang/java/ext/jvm-gc/#dependencies","title":"Dependencies","text":"
    com.netflix.spectator:spectator-ext-gc:0.101.0\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#start-reporting","title":"Start Reporting","text":"

    Then in the initialization for the application:

    import com.netflix.spectator.gc.GcLogger;\n...\n// Keep a single instance of the logger\nGcLogger gc = new GcLogger();\ngc.start(null);\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#logging","title":"Logging","text":"

    After GC events, an DEBUG level log message will get reported using slf4j. This makes it easy to see GC events in the context of other log messages for the application. The logger name is com.netflix.spectator.gc.GcLogger and the message will look like:

    ${GC_TYPE}: ${COLLECTOR_NAME}, id=${N}, at=${START_TIME}, duration=${T}ms,\ncause=[${CAUSE}], ${TOTAL_USAGE_BEFORE} => ${TOTAL_USAGE_AFTER} / ${MAX_SIZE}\n(${PERCENT_USAGE_BEFORE} => ${PERCENT_USAGE_AFTER})\n

    The id can be used to verify events were not skipped or correlate with other sources like detailed GC logs. See GC causes for more details on the possible causes.

    Sample:

    2014-08-31 02:02:24,724  DEBUG [com.netflix.spectator.gc.GcLogger] YOUNG: ParNew,\nid=5281, at=Sun Aug 31 02:02:24 UTC 2014, duration=2ms, cause=[Allocation Failure],\n0.4G => 0.3G / 1.8G (24.3% => 16.6%)\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcallocationrate","title":"jvm.gc.allocationRate","text":"

    The allocation rate measures how fast the application is allocating memory. It is a counter that is incremented after a GC event by the amount youngGen.sizeBeforeGC.

    Technically, right now it is:

    youngGen.sizeBeforeGC - youngGen.sizeAfterGC\n

    However, youngGen.sizeAfterGC should be 0 and thus the size of young gen before the GC is the amount allocated since the previous GC event.

    Unit: bytes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcpromotionrate","title":"jvm.gc.promotionRate","text":"

    The promotion rate measures how fast data is being moved from young generation into the old generation.

    It is a counter that is incremented after a GC event by the amount:

    abs(oldGen.sizeAfterGC - oldGen.sizeBeforeGC)\n

    Unit: bytes/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgclivedatasize","title":"jvm.gc.liveDataSize","text":"

    The live data size is the size of the old generation after a major GC.

    The image below shows how the live data size view compares to a metric showing the current size of the memory pool:

    Unit: bytes

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcmaxdatasize","title":"jvm.gc.maxDataSize","text":"

    Maximum size for the old generation. Primary use-case is for gaining perspective on the the live data size.

    Unit: bytes

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcpause","title":"jvm.gc.pause","text":"

    Pause time for a GC event. All of the values reported are stop the world pauses.

    Unit: seconds

    Dimensions:

    • action: action performed by the garbage collector (getGcAction). There is no guarantee, but the typical values seen are end_of_major_GC and end_of_minor_GC.
    • cause: cause that instigated GC (getGcCause). For an explanation of common causes see the GC causes page.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#jvmgcconcurrentphasetime","title":"jvm.gc.concurrentPhaseTime","text":"

    Time spent in concurrent phases of CMS pauses.

    Unit: seconds

    Dimensions:

    • action: action performed by the garbage collector (getGcAction). There is no guarantee, but the typical values seen are end_of_major_GC and end_of_minor_GC.
    • cause: cause that instigated GC (getGcCause). For an explanation of common causes see the GC causes page.
    "},{"location":"spectator/lang/java/ext/jvm-gc/#alerting","title":"Alerting","text":"

    This section assumes the data is available in Atlas, but users of other systems should be able to take the idea and make it work. For all of these alerts it is recommended to check them on instance. At Netflix that can be done by selecting the option in alert ui:

    "},{"location":"spectator/lang/java/ext/jvm-gc/#max-pause-time","title":"Max Pause Time","text":"

    Example to trigger an alert if the pause time exceeds 500 milliseconds:

    name,jvm.gc.pause,:eq,\nstatistic,max,:eq,\n:and,\n:max,(,cause,),:by,\n0.5,:gt,\n$cause,:legend\n
    "},{"location":"spectator/lang/java/ext/jvm-gc/#heap-pressure","title":"Heap Pressure","text":"

    Example to trigger an alert if the live data size is over 70% of the heap:

    name,jvm.gc.liveDataSize,:eq,:max,\nname,jvm.gc.maxDataSize,:eq,:max,\n:div,100,:mul,\n70,:gt,\npercentUsed,:legend\n
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/","title":"Memory Pools","text":"

    Uses the MemoryPoolMXBean provided by the JDK to monitor the sizes of java memory spaces such as perm gen, eden, old gen, etc.

    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#getting-started","title":"Getting Started","text":"

    To get information about memory pools in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemoryused","title":"jvm.memory.used","text":"

    Gauge reporting the current amount of memory used. For the young and old gen pools this metric will typically have a sawtooth pattern. For alerting or detecting memory pressure the live data size is probably a better option.

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemorycommitted","title":"jvm.memory.committed","text":"

    Gauge reporting the current amount of memory committed. From the javadocs, committed is:

    The amount of memory (in bytes) that is guaranteed to be available for use by the Java virtual machine. The amount of committed memory may change over time (increase or decrease). The Java virtual machine may release memory to the system and committed could be less than init. committed will always be greater than or equal to used.

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#jvmmemorymax","title":"jvm.memory.max","text":"

    Gauge reporting the max amount of memory that can be used. From the javadocs, max is:

    The maximum amount of memory (in bytes) that can be used for memory management. Its value may be undefined. The maximum amount of memory may change over time if defined. The amount of used and committed memory will always be less than or equal to max if max is defined. A memory allocation may fail if it attempts to increase the used memory such that used > committed even if used <= max would still be true (for example, when the system is low on virtual memory).

    Unit: bytes

    Dimensions:

    • See metric dimensions.
    "},{"location":"spectator/lang/java/ext/jvm-memory-pools/#metric-dimensions","title":"Metric Dimensions","text":"

    All memory metrics have the following dimensions:

    • id: name of the memory pool being reported. The names of the pools vary depending on the garbage collector algorithm being used.
    • memtype: type of memory. It has two possible values: HEAP and NON_HEAP. For more information see the javadocs for MemoryType.
    "},{"location":"spectator/lang/java/ext/jvm-safepoint/","title":"Safepoint","text":"

    Uses Hotspot mbean to access the spent in and getting to safepoints.

    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#getting-started","title":"Getting Started","text":"

    To get information about compilation in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-safepoint/#jvmhotspotsafepointtime","title":"jvm.hotspot.safepointTime","text":"

    Timer reporting the amount of time the application has been stopped for safepoint operations.

    Unit: seconds

    "},{"location":"spectator/lang/java/ext/jvm-safepoint/#jvmhotspotsafepointsynctime","title":"jvm.hotspot.safepointSyncTime","text":"

    Timer reporting the amount of time spent getting to safepoints.

    Unit: seconds

    "},{"location":"spectator/lang/java/ext/jvm-threads/","title":"Threads","text":"

    Uses the ThreadMXBean provided by the JDK to monitor the number of active threads and threads started.

    "},{"location":"spectator/lang/java/ext/jvm-threads/#getting-started","title":"Getting Started","text":"

    To get information about threads in Spectator, just setup registration of standard MXBeans. Note, if you are building an app at Netflix, then this should happen automatically via the normal platform initialization.

    import com.netflix.spectator.jvm.Jmx;\n\nJmx.registerStandardMXBeans(registry);\n
    "},{"location":"spectator/lang/java/ext/jvm-threads/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/jvm-threads/#jvmthreadthreadcount","title":"jvm.thread.threadCount","text":"

    Gauge reporting the number of active threads.

    Unit: threads

    Dimensions:

    • id: thread category, either daemon or non-daemon
    "},{"location":"spectator/lang/java/ext/jvm-threads/#jvmthreadthreadsstarted","title":"jvm.thread.threadsStarted","text":"

    Counter reporting the number of threads started.

    Unit: threads/second

    Dimensions:

    • None.
    "},{"location":"spectator/lang/java/ext/log4j1/","title":"Log4j1 Appender","text":"

    Custom appender for log4j1 to track the number of log messages reported.

    Note

    Log4j 1.x has reached end of life and is no longer supported by Apache. This extension is provided for some users that have difficulty moving to a supported version of log4j.

    "},{"location":"spectator/lang/java/ext/log4j1/#getting-started","title":"Getting Started","text":"

    To use it simply add a dependency:

    com.netflix.spectator:spectator-ext-log4j1:0.101.0\n

    Then in your log4j configuration specify the com.netflix.spectator.log4j.SpectatorAppender. In a properties file it would look something like:

    log4j.rootLogger=ALL, A1\nlog4j.appender.A1=com.netflix.spectator.log4j.SpectatorAppender\n
    "},{"location":"spectator/lang/java/ext/log4j1/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/log4j1/#log4jnummessages","title":"log4j.numMessages","text":"

    Counters showing the number of messages that have been passed to the appender.

    Unit: messages/second

    Dimensions:

    • loglevel: standard log level of the events.
    "},{"location":"spectator/lang/java/ext/log4j1/#log4jnumstacktraces","title":"log4j.numStackTraces","text":"

    Counter for the number of messages with stack traces written to the logs.

    Unit: messages/second

    Dimensions:

    • loglevel: standard log level of the events.
    • exception: simple class name for the exception that was thrown.
    • file: file name for where the exception was thrown.
    "},{"location":"spectator/lang/java/ext/log4j2/","title":"Log4j2 Appender","text":"

    Custom appender for log4j2 to track the number of log messages reported.

    "},{"location":"spectator/lang/java/ext/log4j2/#getting-started","title":"Getting Started","text":"

    To use it simply add a dependency:

    com.netflix.spectator:spectator-ext-log4j2:0.101.0\n

    Then in your application initialization:

    Registry registry = ...\nSpectatorAppender.addToRootLogger(\n    registry,             // Registry to use\n    \"spectator\",          // Name for the appender\n    false);               // Should stack traces be ignored?\n

    This will add the appender to the root logger and register a listener so it will get re-added if the configuration changes. You can also use the appender by specifying it in the log4j2 configuration, but this will cause some of the loggers in Spectator to get created before log4j is properly initialized and result in some lost log messages. With that caveat in mind, if you need the additional flexibility of using the configuration then specify the Spectator appender:

    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration monitorInterval=\"5\" status=\"warn\">\n  <Appenders>\n    <Spectator name=\"root\"/>\n  </Appenders>\n  <Loggers>\n    <Root level=\"debug\">\n      <AppenderRef ref=\"root\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
    "},{"location":"spectator/lang/java/ext/log4j2/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/log4j2/#log4jnummessages","title":"log4j.numMessages","text":"

    Counters showing the number of messages that have been passed to the appender.

    Unit: messages/second

    Dimensions:

    • appender: name of the spectator appender.
    • loglevel: standard log level of the events.
    "},{"location":"spectator/lang/java/ext/log4j2/#log4jnumstacktraces","title":"log4j.numStackTraces","text":"

    Counter for the number of messages with stack traces written to the logs. This will only be collected if the ignoreExceptions flag is set to false for the appender.

    Unit: messages/second

    Dimensions:

    • appender: name of the spectator appender.
    • loglevel: standard log level of the events.
    • exception: simple class name for the exception that was thrown.
    • file: file name for where the exception was thrown.
    "},{"location":"spectator/lang/java/ext/placeholders/","title":"Placeholders","text":"

    The placeholders extension allows for identifiers to be created with dimensions that will get filled in based on the context when an activity occurs. The primary use-cases are to support:

    1. Optional dimensions that can be conditionally enabled.
    2. Pulling dimensions from another context such as a thread local store. This can make it is easier to share the across various parts of the code.
    "},{"location":"spectator/lang/java/ext/placeholders/#dependencies","title":"Dependencies","text":"

    To use the placeholders support add a dependency on:

    com.netflix.spectator:spectator-ext-placeholders:0.101.0\n
    "},{"location":"spectator/lang/java/ext/placeholders/#usage","title":"Usage","text":"

    Placeholder support is available for activity based types including counters, timers, and distribution summaries. To get started create a PlaceholderFactory from the registry:

    PlaceholderFactory factory = PlaceholderFactory.from(registry);\n

    Then use the factory to create an identifier using a TagFactory to dynamically fetch the value for a given dimension when some activity occurs. Suppose we want to use a dynamic configuration library such as Archaius to conditionally enable a dimension with high cardinality:

    public class Server {\n\n  private final Context context;\n  private final Counter rps;\n\n  public Server(Context context, PropertyFactory props, Registry registry) {\n    this.context = context;\n\n    // Property that can be dynamically updated to indicate whether or not\n    // detailed dimensions should be added to metrics.\n    Property<Boolean> enabled = props\n      .getProperty(\"server.detailedMetricsEnabled\")\n      .asBoolean(false);\n\n    // Factory for creating instances of the counter using placeholders\n    PlaceholderFactory factory = PlaceholderFactory.from(registry);\n\n    // Create the underlying id with 4 possible dimensions:\n    // *  method and status - low cardinality and always added if available\n    //    in the context.\n    // *  geo and device - high cardinality and only available if the property\n    //    to enable detailed metrics is set to true.\n    PlaceholderId rpsId = factory.createId(\"server.requests\")\n      .withTagFactory(TagFactory.from(\"method\", context::getMethod))\n      .withTagFactory(TagFactory.from(\"status\", context::getStatus))\n      .withTagFactory(new DetailedDimension(\"geo\", enabled, context::getGeo))\n      .withTagFactory(new DetailedDimension(\"device\", enabled, context::getDevice));\n    rps = factory.counter(rpsId);\n  }\n\n  public Response handle(Request request) {\n    fillInContext(request);\n    Response response = process(request);\n    fillInContext(response);\n\n    // Update the counter, the placeholders will be resolved when the activity, in\n    // this case the increment is called.\n    rps.increment();\n    return response;\n  }\n\n  // Tag factory that can be controlled with an enabled property.\n  private static class DetailedDimension implements TagFactory {\n\n    private final String name;\n    private final Supplier<String> valueFunc;\n\n    DetailedDimension(String name, Property<Boolean> enabled, Supplier<String> valueFunc) {\n      this.name = name;\n      this.enabled = enabled;\n      this.valueFunc = valueFunc;\n    }\n\n    @Override public String name() {\n      return name;\n    }\n\n    @Override public Tag createTag() {\n      return enabled.get()\n          ? new BasicTag(name, valueFunc.get())\n          : null;\n    }\n  }\n}\n
    "},{"location":"spectator/lang/java/ext/thread-pools/","title":"Thread Pools","text":"

    Java's ThreadPoolExecutor exposes several properties that are useful to monitor to assess the health, performance, and configuration of the pool.

    "},{"location":"spectator/lang/java/ext/thread-pools/#getting-started","title":"Getting Started","text":"

    To report thread pool metrics, one can attach a ThreadPoolMonitor in the following manner:

    import com.netflix.spectator.api.patterns.ThreadPoolMonitor;\n\nThreadPoolMonitor.attach(registry, myThreadPoolExecutor, \"my-thread-pool\");\n

    The thread pool's properties will be polled regularly in the background and will report metrics to the provided registry. The third parameter will be added to each metric as an id dimension, if provided. However, if the value is null or an empty string, then a default will be used as the id.

    "},{"location":"spectator/lang/java/ext/thread-pools/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/java/ext/thread-pools/#threadpooltaskcount","title":"threadpool.taskCount","text":"

    Counter of the total number of tasks that have been scheduled.

    Unit: tasks/second

    Data Source: ThreadPoolExecutor#getTaskCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcompletedtaskcount","title":"threadpool.completedTaskCount","text":"

    Counter of the total number of tasks that have completed.

    Unit: tasks/second

    Data Source: ThreadPoolExecutor#getCompletedTaskCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcurrentthreadsbusy","title":"threadpool.currentThreadsBusy","text":"

    Gauge showing the current number of threads actively doing work.

    Unit: count

    Data Source: ThreadPoolExecutor#getActiveCount()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolmaxthreads","title":"threadpool.maxThreads","text":"

    Gauge showing the current maximum number of threads configured for the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getMaximumPoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolpoolsize","title":"threadpool.poolSize","text":"

    Gauge showing the current size of the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getPoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolcorepoolsize","title":"threadpool.corePoolSize","text":"

    Gauge showing the current maximum number of core threads configured for the pool.

    Unit: count

    Data Source: ThreadPoolExecutor#getCorePoolSize()

    "},{"location":"spectator/lang/java/ext/thread-pools/#threadpoolqueuesize","title":"threadpool.queueSize","text":"

    Gauge showing the current number of threads queued for execution.

    Unit: count

    Data Source: ThreadPoolExecutor#getQueue().size()

    "},{"location":"spectator/lang/java/meters/counter/","title":"Java Counters","text":"

    Counters are created using the Registry, which is be setup as part of application initialization. For example:

    public class Queue {\n\n  private final Counter insertCounter;\n  private final Counter removeCounter;\n  private final QueueImpl impl;\n\n  @Inject\n  public Queue(Registry registry) {\n    insertCounter = registry.counter(\"queue.insert\");\n    removeCounter = registry.counter(\"queue.remove\");\n    impl = new QueueImpl();\n  }\n

    Then call increment when an event occurs:

      public void insert(Object obj) {\n    insertCounter.increment();\n    impl.insert(obj);\n  }\n\n  public Object remove() {\n    if (impl.nonEmpty()) {\n      removeCounter.increment();\n      return impl.remove();\n    } else {\n      return null;\n    }\n  }\n

    Optionally, an amount can be passed in when calling increment. This is useful when a collection of events happen together.

      public void insertAll(Collection<Object> objs) {\n    insertCounter.increment(objs.size());\n    impl.insertAll(objs);\n  }\n}\n
    "},{"location":"spectator/lang/java/meters/dist-summary/","title":"Java Distribution Summaries","text":"

    Distribution Summaries are created using the Registry, which will be setup as part of application initialization. For example:

    public class Server {\n\n  private final DistributionSummary requestSize;\n\n  @Inject\n  public Server(Registry registry) {\n    requestSize = registry.distributionSummary(\"server.requestSize\");\n  }\n

    Then call record when an event occurs:

      public Response handle(Request request) {\n    requestSize.record(request.sizeInBytes());\n  }\n}\n
    "},{"location":"spectator/lang/java/meters/gauge/","title":"Java Gauges","text":""},{"location":"spectator/lang/java/meters/gauge/#polled-gauges","title":"Polled Gauges","text":"

    The most common use of Gauges is by registering a hook with Spectator, so that it will poll the values in the background. This is done by using the PolledMeter helper class.

    A Polled Gauge is registered by passing in an id, a reference to the object, and a function to get or compute a numeric value based on the object. Note that a Gauge should only be registered once, not on each update. Consider this example of a web server tracking the number of connections:

    class HttpServer {\n  // Tracks the number of current connections to the server\n  private AtomicInteger numConnections;\n\n  public HttpServer(Registry registry) {\n    numConnections = PolledMeter.using(registry)\n      .withName(\"server.numConnections\")\n      .monitorValue(new AtomicInteger(0));\n  }\n\n  public void onConnectionCreated() {\n    numConnections.incrementAndGet();\n    ...\n  }\n\n  public void onConnectionClosed() {\n    numConnections.decrementAndGet();\n    ...\n  }\n\n  ...\n}\n

    The Spectator Registry will keep a weak reference to the object. If the object is garbage collected, then it will automatically drop the registration. In the example above, the Registry will have a weak reference to numConnections and the server instance will have a strong reference to numConnections. If the server instance goes away, then the Gauge will as well.

    When multiple Gauges are registered with the same id, the reported value will be the sum of the matches. For example, if multiple instances of the HttpServer class were created on different ports, then the value server.numConnections would be the total number of connections across all server instances. If a different behavior is desired, then ensure your usage does not perform multiple registrations.

    There are several different ways to register a Gauge:

    "},{"location":"spectator/lang/java/meters/gauge/#using-number","title":"Using Number","text":"

    A Gauge can also be created based on an implementation of Number. Note the Number implementation should be thread-safe. For example:

    AtomicInteger size = new AtomicInteger();\nPolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(size);\n

    The call will return the Number so the registration can be inline on the assignment:

    AtomicInteger size = PolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(new AtomicInteger());\n

    Updates to the value are performed by updating the Number instance directly.

    "},{"location":"spectator/lang/java/meters/gauge/#using-lambda","title":"Using Lambda","text":"

    Specify a lambda that takes the object as parameter.

    public class Queue {\n\n  @Inject\n  public Queue(Registry registry) {\n    PolledMeter.using(registry)\n      .withName(\"queue.size\")\n      .monitorValue(this, Queue::size);\n  }\n\n  ...\n}\n

    Warning

    Be careful to avoid creating a reference to the object in the lambda. It will prevent garbage collection and can lead to a memory leak in the application. For example, by calling size without using the passed in object there will be a reference to this:

    PolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorValue(this, obj -> size());\n
    "},{"location":"spectator/lang/java/meters/gauge/#collection-sizes","title":"Collection Sizes","text":"

    For classes that implement Collection or Map, there are helpers:

    Queue queue = new LinkedBlockingQueue();\nPolledMeter.using(registry)\n  .withName(\"queue.size\")\n  .monitorSize(queue);\n\nMap<String, String> cache = new ConcurrentMap<>();\nPolledMeter.using(registry)\n  .withName(\"cache.size\")\n  .monitorSize(cache);\n
    "},{"location":"spectator/lang/java/meters/gauge/#monotonic-counters","title":"Monotonic Counters","text":"

    A common technique used by some libraries is to expose a monotonically increasing counter that represents the number of events since the system was initialized. An example of that in the JDK is ThreadPoolExecutor.getCompletedTaskCount, which returns the number of completed tasks on the thread pool.

    For sources like this, the monitorMonotonicCounter method can be used:

    // For an implementation of Number\nLongAdder tasks = new LongAdder();\nPolledMeter.using(registry)\n  .withName(\"pool.completedTasks\")\n  .monitorMonotonicCounter(tasks);\n\n// Or using a lambda\nThreadPoolExecutor executor = ...\nPolledMeter.using(registry)\n  .withName(\"pool.completedTasks\")\n  .monitorMonotonicCounter(executor, ThreadPoolExecutor::getCompletedTaskCount);\n

    For thread pools specifically, there are better options for getting standard metrics. See the docs for the Thread Pools extension for more information.

    "},{"location":"spectator/lang/java/meters/gauge/#active-gauges","title":"Active Gauges","text":"

    Gauges can also be set directly by the user. In this mode, the user is responsible for regularly updating the value of the Gauge by calling set. Looking at the HttpServer example, with an active gauge, it would look like:

    class HttpServer {\n  // Tracks the number of current connections to the server\n  private AtomicInteger numConnections;\n  private Gauge gauge;\n\n  public HttpServer(Registry registry) {\n    numConnections = new AtomicInteger();\n    gauge = registry.gauge(\"server.numConnections\");\n    gauge.set(numConnections.get());\n  }\n\n  public void onConnectionCreated() {\n    numConnections.incrementAndGet();\n    gauge.set(numConnections.get());\n    ...\n  }\n\n  public void onConnectionClosed() {\n    numConnections.decrementAndGet();\n    gauge.set(numConnections.get());\n    ...\n  }\n\n  ...\n}\n
    "},{"location":"spectator/lang/java/meters/percentile-timer/","title":"Java Percentile Timers","text":"

    Note: Percentile timers generate a metric per bucket in the histogram. Create instances once per ID and reuse them as needed. Avoid adding tags with high cardinality as that increases the cardinality of the metric. If at all possible, use a Timer instead.

    To get started, create an instance using the Registry:

    public class Server {\n\n  private final Registry registry;\n  private final PercentileTimer requestLatency;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n    requestLatency = PercentileTimer.builder(registry)\n        .withId(registry.createId(\"server.request.latency\", \"status\", \"200\"))\n        .build();\n

    Then wrap the call you need to measure, preferably using a lambda:

      public Response handle(Request request) {\n    return requestLatency.recordRunnable(() -> handleImpl(request));\n  }\n

    The lambda variants will handle exceptions for you and ensure the record happens as part of a finally block using the monotonic time. It could also have been done more explicitly like:

      public Response handle(Request request) {\n    final long start = registry.clock().monotonicTime();\n    try {\n      return handleImpl(request);\n    } finally {\n      final long end = registry.clock().monotonicTime();\n      requestLatency.record(end - start, TimeUnit.NANOSECONDS);\n    }\n  }\n

    This example uses the Clock from the Registry, which can be useful for testing, if you need to control the timing. In actual usage, it will typically get mapped to the system clock. It is recommended to use a monotonically increasing source for measuring the times, to avoid occasionally having bogus measurements due to time adjustments. For more information, see the Clock documentation.

    "},{"location":"spectator/lang/java/meters/timer/","title":"Java Timers","text":""},{"location":"spectator/lang/java/meters/timer/#timer","title":"Timer","text":"

    To get started, create an instance using the Registry:

    public class Server {\n\n  private final Registry registry;\n  private final Timer requestLatency;\n\n  @Inject\n  public Server(Registry registry) {\n    this.registry = registry;\n    requestLatency = registry.timer(\"server.requestLatency\");\n  }\n

    Then wrap the call you need to measure, preferably using a lambda:

      public Response handle(Request request) {\n    return requestLatency.recordRunnable(() -> handleImpl(request));\n  }\n

    The lambda variants will handle exceptions for you and ensure the record happens as part of a finally block using the monotonic time. It could also have been done more explicitly like:

      public Response handle(Request request) {\n    final long start = registry.clock().monotonicTime();\n    try {\n      return handleImpl(request);\n    } finally {\n      final long end = registry.clock().monotonicTime();\n      requestLatency.record(end - start, TimeUnit.NANOSECONDS);\n    }\n  }\n

    This example uses the Clock from the Registry, which can be useful for testing, if you need to control the timing. In actual usage, it will typically get mapped to the system clock. It is recommended to use a monotonically increasing source for measuring the times, to avoid occasionally having bogus measurements due to time adjustments. For more information, see the Clock documentation.

    "},{"location":"spectator/lang/java/meters/timer/#longtasktimer","title":"LongTaskTimer","text":"

    To get started, create an instance using the Registry:

    import com.netflix.spectator.api.patterns.LongTaskTimer;\n\npublic class MetadataService {\n\n  private final LongTaskTimer metadataRefresh;\n\n  @Inject\n  public MetadataService(Registry registry) {\n    metadataRefresh = LongTaskTimer.get(\n        registry, registry.createId(\"metadata.refreshDuration\"));\n    // setup background thread to call refresh()\n  }\n\n  private void refresh() {\n    final int id = metadataRefresh.start();\n    try {\n      refreshImpl();\n    } finally {\n      metadataRefresh.stop(id);\n    }\n  }\n

    The id value returned by the start method is used to keep track of a particular task being measured by the LongTaskTimer. It must be stopped using the provided id. Note that unlike a regular Timer that does not do anything until the final duration is recorded, a LongTaskTimer will report as two Gauges:

    • duration: total duration spent within all currently running tasks.
    • activeTasks: number of currently running tasks.

    This means that you can see what is happening while the task is running, but you need to keep in mind:

    • The meter id is fixed before the task begins. There is no way to change tags based on the run, e.g., update a different Timer, if an exception is thrown.
    • Being a Gauge, it is inappropriate for short tasks. In particular, Gauges are sampled and if it is not sampled during the execution, or the sampling period is a significant subset of the expected duration, then the duration value will not be meaningful.
    "},{"location":"spectator/lang/java/registry/metrics3/","title":"Metrics3 Registry","text":"

    Registry that uses metrics3 as the underlying implementation. To use the metrics registry, add a dependency on the spectator-reg-metrics3 library. For gradle:

    com.netflix.spectator:spectator-reg-metrics3:0.101.0\n

    Then when initializing the application, use the MetricsRegistry. For more information see the metrics3 example.

    "},{"location":"spectator/lang/java/registry/overview/","title":"Registry","text":"

    The Registry is the main class for managing a set of meters. A Meter is a class for collecting a set of measurements about your application.

    "},{"location":"spectator/lang/java/registry/overview/#choose-implementation","title":"Choose Implementation","text":"

    The core Spectator library, spectator-api, comes with the following Registry implementations:

    Class Dependency Description DefaultRegistry spectator-api Updates local counters, frequently used with unit tests. NoopRegistry spectator-api Does nothing, tries to make operations as cheap as possible. This implementation is typically used to help understand the overhead being created due to instrumentation. It can also be useful in testing to help ensure that no side effects were introduced where the instrumentation is now needed in order for the application for function properly. MetricsRegistry spectator-reg-metrics3 Map to metrics3 library. This implementation is typically used for reporting to local files, JMX, or other backends like Graphite. Note that it uses a hierarchical naming scheme rather than the dimensional naming used by Spectator, so the names will get flattened when mapped to this Registry.

    It is recommended for libraries to write code against the Registry interface and allow the implementation to get injected by the user of the library. The simplest way is to accept the Registry via the constructor, for example:

    public class HttpServer {\n  public HttpServer(Registry registry) {\n    // use registry to collect measurements\n  }\n}\n

    The user of the class can then provide the implementation:

    Registry registry = new DefaultRegistry();\nHttpServer server = new HttpServer(registry);\n

    More complete examples can be found on the testing page or in the spectator-examples repo.

    "},{"location":"spectator/lang/java/registry/overview/#working-with-ids","title":"Working with Ids","text":"

    Spectator is primarily intended for collecting data for dimensional time series backends like Atlas. The ids used for looking up a Meter in the Registry consist of a name and set of tags. Ids will be consumed many times by users after the data has been reported, so they should be chosen with some care and thought about how they will get used. See the conventions page for some general guidelines.

    Ids are created via the Registry, for example:

    Id id = registry.createId(\"server.requestCount\");\n

    The ids are immutable, so they can be freely passed around and used in a concurrent context. Tags can be added when an id is created:

    Id id = registry.createId(\n    \"server.requestCount\",\n    \"status\", \"2xx\",\n    \"method\", \"GET\"\n);\n

    Or by using withTag and withTags on an existing id:

    public class HttpServer {\n  private final Id baseId;\n\n  public HttpServer(Registry registry) {\n    baseId = registry.createId(\"server.requestCount\");\n  }\n\n  private void handleRequestComplete(HttpRequest req, HttpResponse res) {\n    // Remember Id is immutable, withTags will return a copy with the\n    // the additional metadata\n    Id reqId = baseId.withTags(\n      \"status\", res.getStatus(),\n      \"method\", req.getMethod().name());\n    registry.counter(reqId).increment();\n  }\n\n  private void handleRequestError(HttpRequest req, Throwable t) {\n    // Can also be added individually using `withTag`. However, it is better\n    // for performance to batch modifications using `withTags`.\n    Id reqId = baseId\n      .withTag(\"error\",  t.getClass().getSimpleName())\n      .withTag(\"method\", req.getMethod().name());\n    registry.counter(reqId).increment();\n  }\n}\n
    "},{"location":"spectator/lang/java/registry/overview/#collecting-measurements","title":"Collecting Measurements","text":"

    Once you have an id, the Registry can be used to get an instance of a Meter to record a measurement. Meters can roughly be categorized in two groups:

    "},{"location":"spectator/lang/java/registry/overview/#active","title":"Active","text":"

    Active Meters are ones that are called directly when some event occurs. There are three basic types supported:

    • Counters measure how often something is occurring. This will be reported to backend systems as a rate-per-second. For example, the number of requests processed by a web server.
    • Timers measure how long something took. For example, the latency of requests processed by a web server.
    • Distribution Summaries measure the size of something. For example, the entity sizes for requests processed by a web server.
    "},{"location":"spectator/lang/java/registry/overview/#passive","title":"Passive","text":"

    Passive Meters are ones where the Registry has a reference to get the value when needed. For example, the number of current connections on a web server or the number threads that are currently in use. These will be Gauges.

    "},{"location":"spectator/lang/java/registry/overview/#global-registry","title":"Global Registry","text":"

    There are some use-cases where injecting the Registry is not possible or is too cumbersome. The main example from the core Spectator libraries is the log4j appender. The Global Registry is useful there because logging is often initialized before any other systems and Spectator itself uses logging via the slf4j api which is quite likely being bound to log4j when that the appender is being used. By using the Global Registry, the logging initialization can proceed before the Spectator initialization in the application. Though any measurements taken before a Registry instance has been added will be lost.

    The Global Registry is accessed using:

    Registry registry = Spectator.globalRegistry();\n

    By default, it will not record anything. For a specific registry instance you can choose to configure it to work with the Global Registry by calling add:

    public void init() {\n  Registry registry = // Choose an appropriate implementation\n\n  // Add it to the global registry so it will receive\n  // any activity on the global registry\n  Spectator.globalRegistry().add(registry);\n}\n

    Any measurements taken while no Registries are added to the global instance will be lost. If multiple Registries are added, all will receive updates made to the Global Registry.

    "},{"location":"spectator/lang/nodejs/usage/","title":"Usage","text":""},{"location":"spectator/lang/nodejs/usage/#project","title":"Project","text":""},{"location":"spectator/lang/nodejs/usage/#spectator-js","title":"spectator-js","text":"
    • Source
    • NPM
    • Product Lifecycle: GA
    • Module Name: nflx-spectator

    This module can be used to instrument an application using counters, distribution summaries, gauges, long task timers, timers, and more complex meter types (like Bucket or Percentile Timers) using a dimensional data model.

    The generated metrics are periodically sent to an Atlas Aggregator.

    "},{"location":"spectator/lang/nodejs/usage/#spectator-js-nodejsmetrics","title":"spectator-js-nodejsmetrics","text":"
    • Source
    • NPM
    • Product Lifecycle: GA
    • Module Name: nflx-spectator-nodejsmetrics

    Generate Node.js runtime metrics using the spectator-js Node module.

    "},{"location":"spectator/lang/nodejs/usage/#install-libraries","title":"Install Libraries","text":"

    Add the following dependencies to package.json:

    {\n  \"dependencies\": {\n    \"nflx-spectator\": \"*\",\n    \"nflx-spectator-nodejsmetrics\": \"*\"\n  }\n}\n
    "},{"location":"spectator/lang/nodejs/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    'use strict';\n\nconst spectator = require('nflx-spectator');\n\n// Netflix applications can use the nflx-spectator-config node module available\n// internally through artifactory to generate the config required by nflx-spectator\nfunction getConfig() {\n  return {\n    commonTags: {'nf.node': 'i-1234'},\n    uri: 'http://atlas.example.org/v1/publish',\n    timeout: 1000 // milliseconds \n  }\n}\n\nclass Response {\n  constructor(status, size) {\n    this.status = status;\n    this.size = size;\n  }\n}\n\nclass Server {\n  constructor(registry) {\n    this.registry = registry;\n    // create a base Id, to which we'll add some dynamic tags later\n    this.requestCountId = registry.createId('server.requestCount', {version: 'v1'});\n    this.requestLatency = registry.timer('server.requestLatency');\n    this.responseSize = registry.distributionSummary('server.responseSizes');\n  }\n\n  handle(request) {\n    const start = this.registry.hrtime();\n\n    // do some work based on request and obtain a response\n    const res = new Response(200, 64);\n\n    // update the counter id with dimensions based on the request. The\n    // counter will then be looked up in the registry which should be \n    // fairly cheap, such as a lookup of an id object in a map\n    // However, it is more expensive than having a local variable set\n    // to the counter\n    const counterId = this.requestCountId.withTags({country: request.country, \n        status: res.status});\n    this.registry.counter(counterId).increment();\n    this.requestLatency.record(this.registry.hrtime(start));\n    this.responseSize.record(res.size);\n    return res;\n  }\n}\n\nconst config = getConfig();\nconst registry = new spectator.Registry(config);\n\nclass Request {\n  constructor(country) {\n    this.country = country;\n  }\n}\n\n// somehow get a request from the user...\nfunction getNextRequest() {\n  return new Request('AR');\n}\n\nfunction handleTermination() {\n  registry.stop();\n}\n\nprocess.on('SIGINT', handleTermination);\nprocess.on('SIGTERM', handleTermination);\n\nregistry.start();\n\nconst server = new Server(registry);\n\nfor (let i = 0; i < 3; ++i) {\n  const req = getNextRequest();\n  server.handle(req)\n}\n\nregistry.stop();\n
    "},{"location":"spectator/lang/nodejs/usage/#enable-runtime-metrics","title":"Enable Runtime Metrics","text":"
    'use strict';\n\nfunction getConfig() {\n}\n\nconst spectator = require('nflx-spectator');\nconst NodeMetrics = require('nflx-spectator-nodejsmetrics');\n\nconst config = {\n  commonTags: {'nf.node': 'i-1234'},\n  uri: 'http://atlas.example.org/v1/publish'\n};\nconst registry = new spectator.Registry(config);\nregistry.start();\n\nconst metrics = new NodeMetrics(registry);\nmetrics.start(); // start collecting nodejs metrics\n\n// ...\n\nmetrics.stop();\nregistry.stop();\n
    "},{"location":"spectator/lang/nodejs/usage/#netflix-integration","title":"Netflix Integration","text":"

    Create a Netflix Spectator Config to be used by spectator-js.

    Only applications should depend on the @netflix-internal/spectator-conf package. Libraries should get the Registry passed by the application, and therefore should only need to depend on spectator-js.

    Add the following dependencies to package.json:

    {\n  \"dependencies\": {\n    \"nflx-spectator\": \"*\",\n    \"nflx-spectator-nodejsmetrics\": \"*\",\n    \"@netflix-internal/spectator-conf\": \"*\"\n  }\n}\n

    This configuration also brings in spectator-js-nodejsmetrics to provide Node.js runtime metrics.

    You can override the logger used by the Spectator registry by setting the logger property. The specified logger should provide debug, info, and error methods. By default, spectator-js logs to stdout.

    const spectator = require('nflx-spectator');\nconst NodeMetrics = require('nflx-spectator-nodejsmetrics');\nconst getSpectatorConfig = require('@netflix-internal/spectator-conf');\nconst logger = require('pino')();\n\n//...\n\nconst registry = new spectator.Registry(getSpectatorConfig());\nregistry.logger = logger;\nregistry.start();\n\nconst metrics = new NodeMetrics(registry);\nmetrics.start();\n\nfunction handleTermination() {\n  metrics.stop();\n  registry.stop();\n}\n\nprocess.on('SIGINT', handleTermination);\nprocess.on('SIGTERM', handleTermination);\n\n//... your app\n\nhandleTermination();\n
    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/","title":"CPU","text":"

    Node.js runtime CPU metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-cpu/#nodejscpuusage","title":"nodejs.cpuUsage","text":"

    Percentage of CPU time the Node.js process is consuming, from 0..100.

    The usage is divided into the following categories:

    • system: CPU time spent running the kernel.
    • user: CPU time spent running user space (non-kernel) processes.

    Unit: percent

    Dimensions:

    • id: The category of CPU usage.

    Example:

    {\n  \"tags\": {\n    \"id\": \"system\",\n    \"name\": \"nodejs.cpuUsage\",\n    /// nf.* tags\n    \"nodejs.version\": \"v6.5.0\"\n  },\n  \"start\": 1485813720000,\n  \"value\": 0.8954088417692685\n},\n{\n  \"tags\": {\n    \"id\": \"user\",\n    \"name\": \"nodejs.cpuUsage\",\n    /// nf.* tags\n    \"nodejs.version\": \"v6.5.0\"\n  },\n  \"start\": 1485813720000,\n  \"value\": 4.659007745141895\n}\n
    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/","title":"Event Loop","text":"

    Node.js runtime event loop metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#nodejseventloop","title":"nodejs.eventLoop","text":"

    The time it takes for the event loop to complete. This is sampled twice per second.

    Unit: seconds

    "},{"location":"spectator/lang/nodejs/ext/nodejs-eventloop/#nodejseventlooplag","title":"nodejs.eventLoopLag","text":"

    The time that the event loop is running behind, as measured by attempting to execute a timer once per second.

    Unit: seconds

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/","title":"File Descriptor","text":"

    Node.js runtime file descriptor metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#openfiledescriptorscount","title":"openFileDescriptorsCount","text":"

    Number of file descriptors currently open.

    Unit: file descriptors

    "},{"location":"spectator/lang/nodejs/ext/nodejs-filedescriptor/#maxfiledescriptorscount","title":"maxFileDescriptorsCount","text":"

    The maximum number of file descriptors that can be open at the same time.

    Unit: file descriptors

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/","title":"Garbarge Collection","text":"

    Node.js runtime garbage collection metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcallocationrate","title":"nodejs.gc.allocationRate","text":"

    The rate at which the app is allocating memory.

    Unit: bytes/second

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgclivedatasize","title":"nodejs.gc.liveDataSize","text":"

    The size of the old_space after a major GC event.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcmaxdatasize","title":"nodejs.gc.maxDataSize","text":"

    The maximum amount of memory the nodejs process is allowed to use. This is primarily used for gaining perspective on the liveDataSize.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcpause","title":"nodejs.gc.pause","text":"

    The time it takes to complete different GC events.

    Event categories:

    • scavenge: The most common garbage collection method. Node will typically trigger one of these every time the VM is idle.
    • markSweepCompact: The heaviest type of garbage collection V8 may do. If you see many of these happening you will need to either keep fewer objects around in your process or increase V8's heap limit.
    • incrementalMarking: A phased garbage collection that interleaves collection with application logic to reduce the amount of time the application is paused.
    • processWeakCallbacks: After a garbage collection occurs, V8 will call any weak reference callbacks registered for objects that have been freed. This measurement is from the start of the first weak callback to the end of the last for a given garbage collection.

    Unit: seconds

    Dimensions:

    • id: The GC event category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-gc/#nodejsgcpromotionrate","title":"nodejs.gc.promotionRate","text":"

    The rate at which data is being moved from new_space to old_space.

    Unit: bytes/second

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/","title":"Heap","text":"

    Node.js runtime heap metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#metrics","title":"Metrics","text":"

    Data is gathered from the v8.getHeapStatistics method.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsdoeszapgarbage","title":"nodejs.doesZapGarbage","text":"

    Whether or not the --zap_code_space option is enabled.

    This makes V8 overwrite heap garbage with a bit pattern. The RSS footprint (resident memory set) gets bigger because it continuously touches all heap pages and that makes them less likely to get swapped out by the operating system.

    Unit: boolean

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsheapsizelimit","title":"nodejs.heapSizeLimit","text":"

    The absolute limit the heap cannot exceed (default limit or --max_old_space_size).

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsmallocedmemory","title":"nodejs.mallocedMemory","text":"

    Current amount of memory, obtained via malloc.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejspeakmallocedmemory","title":"nodejs.peakMallocedMemory","text":"

    Peak amount of memory, obtained via malloc.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalavailablesize","title":"nodejs.totalAvailableSize","text":"

    Available heap size.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalheapsize","title":"nodejs.totalHeapSize","text":"

    Memory V8 has allocated for the heap. This can grow if usedHeap needs more.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalheapsizeexecutable","title":"nodejs.totalHeapSizeExecutable","text":"

    Memory for compiled bytecode and JITed code.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejstotalphysicalsize","title":"nodejs.totalPhysicalSize","text":"

    Committed size.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heap/#nodejsusedheapsize","title":"nodejs.usedHeapSize","text":"

    Memory used by application data.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/","title":"Heap Space","text":"

    Node.js runtime heap space metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#metrics","title":"Metrics","text":"

    Data is gathered from the v8.getHeapSpaceStatistics method, for each space listed.

    Space categories:

    • new_space: Where new allocations happen; it is fast to allocate and collect garbage here. Objects living in the New Space are called the Young Generation.
    • old_space: Object that survived the New Space collector are promoted here; they are called the Old Generation. Allocation in the Old Space is fast, but collection is expensive so it is less frequently performed.
    • code_space: Contains executable code and therefore is marked executable.
    • map_space: Contains map objects only.
    • large_object_space: Contains promoted large objects which exceed the size limits of other spaces. Each object gets its own mmap region of memory and these objects are never moved by GC.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspacesize","title":"nodejs.spaceSize","text":"

    The allocated size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspaceusedsize","title":"nodejs.spaceUsedSize","text":"

    The used size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsspaceavailablesize","title":"nodejs.spaceAvailableSize","text":"

    The available size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-heapspace/#nodejsphysicalspacesize","title":"nodejs.physicalSpaceSize","text":"

    The physical size of the space.

    Unit: bytes

    Dimensions:

    • id: Space category.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/","title":"Memory","text":"

    Node.js runtime memory metrics, provided by spectator-js-nodejsmetrics.

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#metrics","title":"Metrics","text":""},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#common-dimensions","title":"Common Dimensions","text":"

    The following dimensions are common to the metrics published by this module:

    • nodejs.version: The version of the Node.js runtime.
    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsrss","title":"nodejs.rss","text":"

    Resident Set Size, which is the total memory allocated for the process execution. This includes the Code Segment, Stack (local variables and pointers) and Heap (objects and closures).

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsheaptotal","title":"nodejs.heapTotal","text":"

    Total size of the allocated heap.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsheapused","title":"nodejs.heapUsed","text":"

    Memory used during the execution of our process.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/ext/nodejs-memory/#nodejsexternal","title":"nodejs.external","text":"

    Memory usage of C++ objects bound to JavaScript objects managed by V8.

    Unit: bytes

    "},{"location":"spectator/lang/nodejs/meters/counter/","title":"Counter","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/dist-summary/","title":"Distribution Summary","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/gauge/","title":"Gauge","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/percentile-timer/","title":"Percentile Timer","text":"

    TBD

    "},{"location":"spectator/lang/nodejs/meters/timer/","title":"Timer","text":"

    TBD

    "},{"location":"spectator/lang/py/migrations/","title":"Migrations","text":""},{"location":"spectator/lang/py/migrations/#migrating-from-02-to-10","title":"Migrating from 0.2 to 1.0","text":"

    Version 1.0 consists of a major rewrite that cleans up and simplifies the spectator-py thin client API. It is designed to send metrics through spectatord. As a result, some functionality has been moved to other modules, or removed.

    "},{"location":"spectator/lang/py/migrations/#new","title":"New","text":""},{"location":"spectator/lang/py/migrations/#config","title":"Config","text":"
    • Replace the SidecarConfig with Config, and simplify usage.
    • The location configuration is clarified, with a default set to the spectatord UDP port, and a new option for picking the default Unix Domain Socket for spectatord.
    • The extra_common_tags concept is clarified. Any extra common tags provided through the Config object are merged with two process-specific tags that may be present in environment variables.
    • Any MeterId or Meter objects created through Registry methods will contain these extra tags.
    "},{"location":"spectator/lang/py/migrations/#meters","title":"Meters","text":"
    • The AgeGauge meter added a now() method, which sets 0 as the value, so you do not need to remember this special value.
    • Add MonotonicCounterUint with a c_uint64 data type, to support uint64 data types. These are not commonly encountered, as they usually only show up in networking metrics, such as bytes/sec in high-volume contexts. When you need it, you need it, else wise, it can be ignored.
    • The MonotonicCounter with a float data type continues to exist, for the more common use case.
    • Note that monotonic counters are convenience meter types provided by spectatord, because they help you avoid the work of tracking previous values and calculating deltas.
    "},{"location":"spectator/lang/py/migrations/#registry","title":"Registry","text":"
    • Add a new_id() method and *_with_id() methods for all meter types, to support more complex tag operations related to MeterId objects. This follows the way they work in the other clients.
    "},{"location":"spectator/lang/py/migrations/#moved","title":"Moved","text":""},{"location":"spectator/lang/py/migrations/#meters_1","title":"Meters","text":"
    • Separate classes for each Meter type. Relocated to a new module, spectator.meter.
    "},{"location":"spectator/lang/py/migrations/#stopwatch","title":"StopWatch","text":"
    • The StopWatch context manager is no longer part of the Timer class; it is now a standalone class. It has been preserved, because it continues to fulfill the purpose of simplifying how Timer and PercentileTimer meters record their values after exiting a block of code, and there are a few uses of this class across the organization.

    Before:

    import time\nfrom spectator import GlobalRegistry\n\nserver_latency = GlobalRegistry.pct_timer(\"serverLatency\")\n\nwith server_latency.stopwatch():\n    time.sleep(5)\n

    After:

    import time\nfrom spectator.registry import Registry\nfrom spectator.stopwatch import StopWatch\n\nregistry = Registry()\nserver_latency = registry.pct_timer(\"serverLatency\")\n\nwith StopWatch(server_latency):\n    time.sleep(5)\n
    "},{"location":"spectator/lang/py/migrations/#writers","title":"Writers","text":"
    • Separate classes for each Writer type. Relocated to a new module, spectator.writer.
    "},{"location":"spectator/lang/py/migrations/#removed","title":"Removed","text":"
    • All remnants of the previous thick-client API.
    "},{"location":"spectator/lang/py/migrations/#deprecated","title":"Deprecated","text":"
    • The GlobalRegistry is a hold-over from the thick-client version of this library, but it has been maintained to help minimize the amount of code change that application owners need to implement when adopting the thin-client version of the library. Replace with direct use of Registry.
    • There are no plans to remove the GlobalRegistry, until we know that all uses have been removed.

    Before:

    from spectator import GlobalRegistry\n\nGlobalRegistry.gauge(\"server.queueSize\", ttl_seconds=120).set(10)\n

    After:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.gauge(\"server.queueSize\", ttl_seconds=120).set(10)\n
    "},{"location":"spectator/lang/py/migrations/#migrating-from-01-to-02","title":"Migrating from 0.1 to 0.2","text":"
    • This library no longer publishes directly to the Atlas backends. It now publishes to the [SpectatorD] sidecar which is bundled with all standard AMIs and containers. If you must have the previous direct publishing behavior, because SpectatorD is not yet available on the platform where your code runs, then you can pin to version 0.1.18.
    • The internal Netflix configuration companion library is no longer required and this dependency may be dropped from your project.
    • The API surface area remains unchanged to avoid breaking library consumers, and standard uses of GlobalRegistry helper methods for publishing metrics continue to work as expected. Several helper methods on meter classes are now no-ops, always returning values such as 0 or nan. If you want to write tests to validate metrics publication, take a look at the tests in this library for a few examples of how that can be done. The core idea is to capture the lines which will be written out to SpectatorD.
    • Replace uses of PercentileDistributionSummary with direct use of the Registry pct_distribution_summary method.

      # before\nfrom spectator import GlobalRegistry\nfrom spectator.histogram import PercentileDistributionSummary\n\nd = PercentileDistributionSummary(GlobalRegistry, \"server.requestSize\")\nd.record(10)\n
      # after\nfrom spectator import GlobalRegistry\n\nGlobalRegistry.pct_distribution_summary(\"server.requestSize\").record(10)\n
    • Replace uses of PercentileTimer with direct use of the Registry pct_timer method.

      # before\nfrom spectator import GlobalRegistry\nfrom spectator.histogram import PercentileTimer\n\nt = PercentileTimer(GlobalRegistry, \"server.requestSize\")\nt.record(0.01)\n
      # after\nfrom spectator import GlobalRegistry\n\nGlobalRegistry.pct_timer(\"server.requestSize\").record(0.1)\n
    • Implemented new meter types supported by [SpectatorD]: age_gauge, max_gauge and monotonic_counter. See the SpectatorD documentation or the class docstrings for more details.

    "},{"location":"spectator/lang/py/usage/","title":"spectator-py Usage","text":"

    Python thin-client metrics library for use with Atlas and SpectatorD.

    "},{"location":"spectator/lang/py/usage/#supported-python-versions","title":"Supported Python Versions","text":"

    This library currently targets the Python >= 3.8.

    "},{"location":"spectator/lang/py/usage/#installing","title":"Installing","text":"

    Install this library for your project as follows:

    pip install netflix-spectator-py\n
    "},{"location":"spectator/lang/py/usage/#instrumenting-code","title":"Instrumenting Code","text":"
    import logging\n\nfrom flask import Flask, request, Response\nfrom flask.logging import default_handler\nfrom spectator.config import Config\nfrom spectator.registry import Registry\nfrom spectator.stopwatch import StopWatch\n\nroot_logger = logging.getLogger()\nroot_logger.setLevel(logging.DEBUG)\nroot_logger.addHandler(default_handler)\n\nconfig = Config(location=\"none\", extra_common_tags={\"nf.platform\": \"my_platform\"})\nregistry = Registry(config)\n\nrequest_count_id = registry.new_id(\"server.requestCount\", {\"version\": \"v1\"})\nrequest_latency = registry.timer(\"server.requestLatency\")\nresponse_size = registry.distribution_summary(\"server.responseSize\")\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef root():\n    return Response(\"Usage: /api/v1/play?country=foo&title=bar\")\n\n\n@app.route(\"/api/v1/play\", methods=[\"GET\", \"POST\"])\ndef play():\n    if request.method == \"GET\":\n        with StopWatch(request_latency):\n            status_code = 200\n            country = request.args.get(\"country\", default=\"none\")\n            title = request.args.get(\"title\", default=\"none\")\n\n            tags = {\"country\": country, \"title\": title, \"status\": str(status_code)}\n            request_count_with_tags = request_count_id.with_tags(tags)\n            counter = registry.counter_with_id(request_count_with_tags)\n            counter.increment()\n\n            message = f\"requested play for country={country} title={title}\"\n            response_size.record(len(message))\n            return Response(message, status=status_code)\n    else:\n        status_code = 405\n\n        tags = {\"status\": str(status_code)}\n        request_count_with_tags = request_count_id.with_tags(tags)\n        counter = registry.counter_with_id(request_count_with_tags)\n        counter.increment()\n\n        return Response(\"unsupported request method\", status=status_code)\n

    Save this snippet as app.py, then flask --app app run.

    "},{"location":"spectator/lang/py/usage/#importing","title":"Importing","text":""},{"location":"spectator/lang/py/usage/#standard-usage","title":"Standard Usage","text":"

    Instantiate a Registry object, with either a default or custom Config, and use it to create and manage MeterId and Meter objects.

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.counter(\"server.requestCount\").increment()\n
    "},{"location":"spectator/lang/py/usage/#legacy-usage","title":"Legacy Usage","text":"

    The GlobalRegistry concept is a hold-over from the thick-client version of this library, but it has been maintained to help minimize the amount of code change that application owners need to implement when adopting the thin client version of the library. It existed as a concept in the thick client because it was stateful, and required starting background threads. The thin client version is stateless.

    Importing the GlobalRegistry instantiates a Registry with a default Config that applies process-specific common tags based on environment variables and opens a UDP socket to the local SpectatorD agent. The remainder of the instance-specific common tags are provided by SpectatorD. Once imported, the GlobalRegistry can be used to create and manage Meters.

    from spectator import GlobalRegistry\n\nGlobalRegistry.counter(\"server.requestCount\").increment()\n
    "},{"location":"spectator/lang/py/usage/#logging","title":"Logging","text":"

    This package provides the following loggers:

    • spectator.meter.meter_id, which reports invalid tags at WARNING level.
    • spectator.registry, which reports Registry status messages at INFO level, and errors closing writers at ERROR level.
    • spectator.writer, which reports the protocol lines written at DEBUG level, and writing errors at ERROR level.

    When troubleshooting metrics collection and reporting, you should set the spectator.meter.meter_id logger to DEBUG level, before the first metric is recorded. For example:

    import logging\n\n# record the human-readable time, name of the logger, logging level, thread id and message\nlogging.basicConfig(\n    level=logging.DEBUG,\n    format='%(asctime)s - %(name)s - %(levelname)s - %(thread)d - %(message)s'\n)\n\nlogging.getLogger('spectator.meter.meter_id').setLevel(logging.DEBUG)\n
    "},{"location":"spectator/lang/py/usage/#working-with-meterid-objects","title":"Working with MeterId Objects","text":"

    Each metric stored in Atlas is uniquely identified by the combination of the name and the tags associated with it. In spectator-py, this data is represented with MeterId objects, created by the Registry. The new_id() method returns new MeterId objects, which have extra common tags applied, and which can be further customized by calling the with_tag() and with_tags() methods. Each MeterId will create and store a validated subset of the spectatord protocol line to be written for each Meter, when it is instantiated. MeterId objects are immutable, so they can be freely passed around and used concurrently. Manipulating the tags with the provided methods will create new MeterId objects, to assist with maintaining immutability.

    Note that all tag keys and values must be strings. For example, if you want to keep track of the number of successful requests, then you must cast integers to strings. The MeterId class will validate these values, dropping or changing any that are not valid, and reporting a warning log.

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.counter(\"server.numRequests\", {\"statusCode\": str(200)}).increment()\n\nnum_requests_id = registry.new_id(\"server.numRequests\", {\"statusCode\": str(200)})\nregistry.counter_with_id(num_requests_id).increment()\n

    Atlas metrics will be consumed by users many times after the data has been reported, so they should be chosen thoughtfully, while considering how they will be used. See the naming conventions page for general guidelines on metrics naming and restrictions.

    "},{"location":"spectator/lang/py/usage/#meter-types","title":"Meter Types","text":"
    • Age Gauge
    • Counter
    • Distribution Summary
    • Gauge
    • Max Gauge
    • Monotonic Counter
    • Monotonic Counter Uint
    • Percentile Distribution Summary
    • Percentile Timer
    • Timer
    "},{"location":"spectator/lang/py/usage/#asyncio-support","title":"asyncio Support","text":"

    The Registry provides a UdpWriter by default. UDP is a non-blocking, unordered and connectionless protocol, which is ideal for communicating with a local SpectatorD process in a variety of circumstances. The UdpWriter should be used in asyncio applications.

    The FileWriter implementation, which can be used to communicate with the SpectatorD Unix domain socket, for slightly higher performance, does not offer asyncio support at this time.

    "},{"location":"spectator/lang/py/usage/#ipv6-support","title":"IPv6 Support","text":"

    By default, SpectatorD will listen on IPv6 UDP *:1234, without setting the v6_only(true) flag. On dual-stacked systems, this means that it will receive packets from both IPv4 and IPv6, and the IPv4 addresses will show up on the server as IPv4-mapped IPv6 addresses.

    By default, the UdpWriter will send UDP packets to 127.0.0.1:1234, which will allow for communication with SpectatorD on dual-stacked systems.

    On IPv6-only systems, it may be necessary to change the default configuration using one of the following methods:

    • Configure the following environment variable, which will override the default location Config in the Registry:
    export SPECTATOR_OUTPUT_LOCATION=\"udp://[::1]:1234\"\n
    • Provide a custom Config for the Registry:
    from spectator.config import Config\nfrom spectator.registry import Registry\n\nconfig = Config(location=\"udp://[::1]:1234\")\nregistry = Registry(config)\nregistry.counter(\"server.numRequests\").increment()\n
    "},{"location":"spectator/lang/py/usage/#writing-tests","title":"Writing Tests","text":"

    To write tests against this library, instantiate an instance of the Registry and provide a Config that selects the MemoryWriter. This Writer stores all updates in a List[str]. Use the writer() method on the Registry to access the writer, then inspect the last_line() or get() all messages to verify your metrics updates.

    import unittest\n\nfrom spectator.config import Config\nfrom spectator.registry import Registry\n\nclass MetricsTest(unittest.TestCase):\n\n    def test_counter(self):\n        r = Registry(Config(\"memory\"))\n\n        c = r.counter(\"server.numRequests\")\n        self.assertTrue(r.writer().is_empty())\n\n        c.increment()\n        self.assertEqual(\"c:server.numRequests:1\", r.writer().last_line())\n
    "},{"location":"spectator/lang/py/usage/#overriding-output-location","title":"Overriding Output Location","text":"

    If you need to override the default output location (UDP) of the Registry, then you can set a Config class location to one of the following supported values:

    • none - Disable output.
    • memory - Write to memory.
    • stderr - Write to standard error for the process.
    • stdout - Write to standard out for the process.
    • udp - Write to the default UDP port for spectatord.
    • unix - Write to the default unix datagram socket for spectatord.
    • file://$path_to_file - Write to a custom file (e.g. file:///tmp/foo/bar).
    • udp://$host:$port - Write to a custom UDP socket.

    The SPECTATOR_OUTPUT_LOCATION environment variable accepts the same values, and can be used to override the value provided to the Config class, which may be useful in CI/CD contexts. For example, if you want to disable metrics publishing from the Registry, then you can set:

    export SPECTATOR_OUTPUT_LOCATION=none\n
    "},{"location":"spectator/lang/py/usage/#protocol-parser","title":"Protocol Parser","text":"

    A SpectatorD line protocol parser is available, which ca be used for validating the results captured by a MemoryWriter.

    import unittest\n\nfrom spectator.meter.counter import Counter\nfrom spectator.protocol_parser import get_meter_class, parse_protocol_line\n\nclass ProtocolParserTest(unittest.TestCase):\n\n    def test_parse_counter_with_multiple_tags(self):\n        symbol, id, value = parse_protocol_line(\"c:counter,foo=bar,baz=quux:1\")\n        self.assertEqual(\"c\", symbol)\n        self.assertEqual(Counter, get_meter_class(symbol))\n        self.assertEqual(\"counter\", id.name())\n        self.assertEqual({\"foo\": \"bar\", \"baz\": \"quux\"}, id.tags())\n        self.assertEqual(\"1\", value)\n
    "},{"location":"spectator/lang/py/meters/age-gauge/","title":"Age Gauge","text":"

    The value is the time in seconds since the epoch at which an event has successfully occurred, or 0 to use the current time in epoch seconds. After an Age Gauge has been set, it will continue reporting the number of seconds since the last time recorded, for as long as the SpectatorD process runs. The purpose of this metric type is to enable users to more easily implement the Time Since Last Success alerting pattern.

    To set a specific time as the last success:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.age_gauge(\"time.sinceLastSuccess\").set(1611081000)\n\nlast_success = registry.new_id(\"time.sinceLastSuccess\")\nregistry.age_gauge_with_id(last_success).set(1611081000)\n

    To set now() as the last success:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.age_gauge(\"time.sinceLastSuccess\").now()\n\nlast_success = registry.new_id(\"time.sinceLastSuccess\")\nregistry.age_gauge_with_id(last_success).now()\n

    By default, a maximum of 1000 Age Gauges are allowed per spectatord process, because there is no mechanism for cleaning them up. This value may be tuned with the --age_gauge_limit flag on the spectatord binary.

    Since Age Gauges are long-lived entities that reside in the memory of the SpectatorD process, if you need to delete and re-create them for any reason, then you can use the SpectatorD admin server to accomplish this task. You can delete all Age Gauges or a single Age Gauge.

    Example:

    curl -X DELETE \\\nhttp://localhost:1234/metrics/A\n
    curl -X DELETE \\\nhttp://localhost:1234/metrics/A/fooIsTheName,some.tag=val1,some.otherTag=val2\n
    "},{"location":"spectator/lang/py/meters/counter/","title":"Counter","text":"

    A Counter is used to measure the rate at which an event is occurring. Considering an API endpoint, a Counter could be used to measure the rate at which it is being accessed.

    Counters are reported to the backend as a rate-per-second. In Atlas, the :per-step operator can be used to convert them back into a value-per-step on a graph.

    Call increment() when an event occurs:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.counter(\"server.numRequests\").increment()\n\nnum_requests = registry.new_id(\"server.numRequests\")\nregistry.counter_with_id(num_requests).increment()\n

    You can also pass a value to increment(). This is useful when a collection of events happens together:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.counter(\"queue.itemsAdded\").increment(10)\n\nnum_requests = registry.new_id(\"server.numRequests\")\nregistry.counter_with_id(num_requests).increment(10)\n
    "},{"location":"spectator/lang/py/meters/dist-summary/","title":"Distribution Summary","text":"

    A Distribution Summary is used to track the distribution of events. It is similar to a Timer, but more general, in that the size does not have to be a period of time. For example, a Distribution Summary could be used to measure the payload sizes of requests hitting a server.

    Always use base units when recording data, to ensure that the tick labels presented on Atlas graphs are readable. If you are measuring payload size, then use bytes, not kilobytes (or some other unit). This means that a 4K tick label will represent 4 kilobytes, rather than 4 kilo-kilobytes.

    Call record() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.distribution_summary(\"server.requestSize\").record(10)\n\nrequest_size = registry.new_id(\"server.requestSize\")\nregistry.distribution_summary_with_id(request_size).record(10)\n
    "},{"location":"spectator/lang/py/meters/gauge/","title":"Gauges","text":"

    A gauge is a value that is sampled at some point in time. Typical examples for gauges would be the size of a queue or number of threads in a running state. Since gauges are not updated inline when a state change occurs, there is no information about what might have occurred between samples.

    Consider monitoring the behavior of a queue of tasks. If the data is being collected once a minute, then a gauge for the size will show the size when it was sampled. The size may have been much higher or lower at some point during interval, but that is not known.

    Call set() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.gauge(\"server.queueSize\").set(10)\n\nqueue_size = registry.new_id(\"server.queueSize\")\nregistry.gauge_with_id(queue_size).set(10)\n

    Gauges will report the last set value for 15 minutes. This done so that updates to the values do not need to be collected on a tight 1-minute schedule to ensure that Atlas shows unbroken lines in graphs. A custom TTL may be configured for gauges. SpectatorD enforces a minimum TTL of 5 seconds.

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.gauge(\"server.queueSize\", ttl_seconds=120).set(10)\n\nqueue_size = registry.new_id(\"server.queueSize\")\nregistry.gauge_with_id(queue_size, ttl_seconds=120).set(10)\n
    "},{"location":"spectator/lang/py/meters/max-gauge/","title":"Max Gauge","text":"

    The value is a number that is sampled at a point in time, but it is reported as a maximum Gauge value to the backend. This ensures that only the maximum value observed during a reporting interval is sent to the backend, thus over-riding the last-write-wins semantics of standard Gauges. Unlike standard Gauges, Max Gauges do not continue to report to the backend, and there is no TTL.

    Call set() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.max_gauge(\"server.queueSize\").set(10)\n\nqueue_size = registry.new_id(\"server.queueSize\")\nregistry.max_gauge_with_id(queue_size).set(10)\n
    "},{"location":"spectator/lang/py/meters/monotonic-counter-uint/","title":"Monotonic Counter Uint","text":"

    A Monotonic Counter (uint64) is used to measure the rate at which an event is occurring, when the source data is a monotonically increasing number. A minimum of two samples must be sent, in order to calculate a delta value and report it to the backend as a rate-per-second. A variety of networking metrics may be reported monotonically, and this metric type provides a convenient means of recording these values, at the expense of a slower time-to-first metric.

    Call set() when an event occurs:

    from ctypes import c_uint64\nfrom spectator.registry import Registry\n\nregistry = Registry()\nregistry.monotonic_counter_uint(\"iface.bytes\").set(c_uint64(1))\n\niface_bytes = registry.new_id(\"iface.bytes\")\nregistry.monotonic_counter_uint_with_id(iface_bytes).set(c_uint64(1))\n
    "},{"location":"spectator/lang/py/meters/monotonic-counter/","title":"Monotonic Counter","text":"

    A Monotonic Counter (float) is used to measure the rate at which an event is occurring, when the source data is a monotonically increasing number. A minimum of two samples must be sent, in order to calculate a delta value and report it to the backend as a rate-per-second. A variety of networking metrics may be reported monotonically, and this metric type provides a convenient means of recording these values, at the expense of a slower time-to-first metric.

    Call set() when an event occurs:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.monotonic_counter(\"iface.bytes\").set(10)\n\niface_bytes = registry.new_id(\"iface.bytes\")\nregistry.monotonic_counter_with_id(iface_bytes).set(10)\n
    "},{"location":"spectator/lang/py/meters/percentile-dist-summary/","title":"Percentile Distribution Summary","text":"

    The value tracks the distribution of events, with percentile estimates. It is similar to a PercentileTimer, but more general, because the size does not have to be a period of time.

    For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Distribution Summary. Be diligent about any additional dimensions added to Percentile Distribution Summaries and ensure that they have a small bounded cardinality.

    Call record() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.pct_distribution_summary(\"server.requestSize\").record(10)\n\nrequest_size = registry.new_id(\"server.requestSize\")\nregistry.pct_distribution_summary_with_id(request_size).record(10)\n
    "},{"location":"spectator/lang/py/meters/percentile-timer/","title":"Percentile Timer","text":"

    The value is the number of seconds that have elapsed for an event, with percentile estimates.

    This metric type will track the data distribution by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality.

    Call record() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.pct_timer(\"server.requestLatency\").record(0.01)\n\nrequest_latency = registry.new_id(\"server.requestLatency\")\nregistry.pct_timer_with_id(request_latency).record(0.01)\n

    A StopWatch class is available, which may be used as a Context Manager to automatically record the number of seconds that have elapsed while executing a block of code:

    import time\nfrom spectator.registry import Registry\nfrom spectator.stopwatch import StopWatch\n\nregistry = Registry()\nthread_sleep = registry.pct_timer(\"thread.sleep\")\n\nwith StopWatch(thread_sleep):\n    time.sleep(5)\n
    "},{"location":"spectator/lang/py/meters/timer/","title":"Timer","text":"

    A Timer is used to measure how long (in seconds) some event is taking.

    Call record() with a value:

    from spectator.registry import Registry\n\nregistry = Registry()\nregistry.timer(\"server.requestLatency\").record(0.01)\n\nrequest_latency = registry.new_id(\"server.requestLatency\")\nregistry.timer_with_id(request_latency).record(0.01)\n

    A StopWatch class is available, which may be used as a Context Manager to automatically record the number of seconds that have elapsed while executing a block of code:

    import time\nfrom spectator.registry import Registry\nfrom spectator.stopwatch import StopWatch\n\nregistry = Registry()\nthread_sleep = registry.timer(\"thread.sleep\")\n\nwith StopWatch(thread_sleep):\n    time.sleep(5)\n
    "},{"location":"spectator/lang/rb/deprecated/","title":"spectator-rb Usage","text":"

    This client library is deprecated, does not support spectatord, and it is no longer maintained.

    You should move to a Paved Path language as soon as possible.

    "},{"location":"spectator/patterns/cardinality-limiter/","title":"Cardinality Limiter","text":"

    Helper functions to help manage the cardinality of tag values. This should be used anywhere you cannot guarantee that the tag values being used are strictly bounded. There is support for two different modes: (1) selecting the first N values that are seen, or (2) selecting the most frequent N values that are seen.

    Example usage:

    class WebServer {\n\n  // Limiter instance, should be shared for all uses of that tag value\n  private final Function&lt;String, String&gt; pathLimiter =\n    CardinalityLimiters.mostFrequent(10);\n\n  private final Registry registry;\n  private final Id baseId;\n\n  public WebServer(Registry registry) {\n    this.registry = registry;\n    this.baseId = registry.createId(\"server.requestCount\");\n  }\n\n  public Response handleRequest(Request req) {\n    Response res = doSomething(req);\n\n    // Update metrics, use limiter to restrict the set of values for the\n    // path and avoid an explosion\n    String pathValue = pathLimiter.apply(req.getPath());\n    Id id = baseId\n      .withTag(\"path\", pathValue)\n      .withTag(\"status\", res.getStatus());\n    registry.counter(id).increment();\n  }\n}\n
    "},{"location":"spectator/patterns/gauge-poller/","title":"Gauge Poller","text":"

    Helper for polling gauges in a background thread. A shared executor is used with a single thread. If registered gauge methods are cheap as they should be, then this should be plenty of capacity to process everything regularly. If not, then this will help limit the damage to a single core and avoid causing problems for the application.

    "},{"location":"spectator/patterns/interval-counter/","title":"Interval Counter","text":"

    A counter that also keeps track of the time since last update.

    "},{"location":"spectator/patterns/long-task-timer/","title":"Long Task Timer","text":"

    Timer intended to track a small number of long running tasks. Example would be something like a batch hadoop job. Though \"long running\" is a bit subjective the assumption is that anything over a minute is long running.

    A regular Timer just records the duration and has no information until the task is complete.

    As an example, consider a chart showing request latency to a typical web server. The expectation is many short requests, so the timer will be getting updated many times per second.

    Now consider a background process to refresh metadata from a data store. For example, Edda caches AWS resources such as instances, volumes, auto-scaling groups etc. Normally, all data can be refreshed in a few minutes. If the AWS services are having problems, it can take much longer. A long duration timer can be used to track the overall time for refreshing the metadata.

    The charts below show max latency for the refresh using a regular timer and a long task timer. Regular timer, note that the y-axis is using a logarithmic scale:

    Long Task Timer:

    "},{"location":"spectator/patterns/percentile-timer/","title":"Percentile Timers","text":"

    A Timer that buckets the counts, to allow for estimating percentiles. This Timer type will track the data distribution for the timer by maintaining a set of Counters. The distribution can then be used on the server side to estimate percentiles, while still allowing for arbitrary slicing and dicing based on dimensions.

    Warning

    Please be selective about what you measure as there is significant overhead on both the client and storage side. Usually only one or two key performance indicators (KPIs) per application. Limit the tag cardinality as much as possible. For example, only include an endpoint tag, not a user agent or response code. Use one of the other meter types whenever possible.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile Timers and ensure that they have a small bounded cardinality. In addition, it is highly recommended to set a range, whenever possible, to restrict the worst case overhead.

    When using the builder, the range will default from 10 ms to 1 minute. Based on data at Netflix, this is the most common range for request latencies and restricting to this window reduces the worst case multiple from 276X to 58X.

    "},{"location":"spectator/patterns/percentile-timer/#range-recommendations","title":"Range Recommendations","text":"

    The range should be the SLA boundary or failure point for the activity. Explicitly setting the range allows us to optimize for the important range of values and reduce the overhead associated with tracking the data distribution.

    For example, suppose you are making a client call and timeout after 10 seconds. Setting the range to 10 seconds will restrict the possible set of buckets used to those approaching the boundary. So we can still detect if it is nearing failure, but percentiles that are further away from the range may be inflated compared to the actual value.

    "},{"location":"spectator/patterns/percentile-timer/#bucket-distribution","title":"Bucket Distribution","text":"

    The set of buckets is generated by using powers of 4 and incrementing by one-third of the previous power of 4 in between as long as the value is less than the next power of 4 minus the delta.

    Base: 1, 2, 3\n\n4 (4^1), delta = 1\n    5, 6, 7, ..., 14,\n\n16 (4^2), delta = 5\n   21, 26, 31, ..., 56,\n\n64 (4^3), delta = 21\n...\n
    "},{"location":"spectator/patterns/polled-meter/","title":"Polled Meter","text":"

    Helper for configuring a meter that will receive a value by regularly polling the source in the background.

    Example usage:

    Registry registry = ...\nAtomicLong connections = PolledMeter.using(registry)\n  .withName(\"server.currentConnections\")\n  .monitorValue(new AtomicLong());\n\n// When a connection is added\nconnections.incrementAndGet();\n\n// When a connection is removed\nconnections.decrementAndGet();\n

    Polling frequency will depend on the underlying Registry implementation, but users should assume it will be frequently checked and that the provided function is cheap. Users should keep in mind that polling will not capture all activity, just sample it at some frequency. For example, if monitoring a queue, then a meter will only tell you the last sampled size when the value is reported. If more details are needed, then use an alternative type and ensure that all changes are reported when they occur.

    For example, consider tracking the number of currently established connections to a server. Using a polled meter will show the last sampled number when reported. An alternative would be to report the number of connections to a Distribution Summary every time a connection is added or removed. The distribution summary would provide more accurate tracking such as max and average number of connections across an interval of time. The polled meter would not provide that level of detail.

    If multiple values are monitored with the same id, then the values will be aggregated and the sum will be reported. For example, registering multiple meters for active threads in a thread pool with the same id would produce a value that is the overall number of active threads. For other behaviors, manage it on the user side and avoid multiple registrations.

    "},{"location":"spectator/specs/ipc/","title":"IPC","text":"

    This is a description of the Common IPC Metrics that can be published by various IPC libraries, with the goal of allowing consolidated monitoring and analysis across differing IPC implementations.

    "},{"location":"spectator/specs/ipc/#dimensions-common-to-all-metrics","title":"Dimensions Common to All Metrics","text":"

    Not all dimensions are applicable for all of the metrics, and later in the sections for each specific metric, the applicable dimensions are specified.

    Also note that not all dimensions have been implemented or are applicable for all implementations.

    • ipc.protocol: A short name of the network protocol in use, eg. grpc, http_1, http_2, udp, etc ...
    • ipc.vip: The Eureka VIP address used to find the the server.
    • ipc.result: Was this considered by the implementation to be successful. Allowed Values = [success, failure].
    • ipc.status: One of a predefined list of status values indicating the general result, eg. success, bad_request, timeout, etc\u2026 See the ipc.status values section below.
    • ipc.status.detail: For cases where the ipc.status needs to be further subdivided, this tag can hold an additional more specific detail, likely ipc-implementation specific. eg status of connection_error and detail of no_servers / connect_timeout / ssl_handshake_failure.
    • ipc.failure.injected: Indicates that an artificial failure was injected into the request processing for testing purposes. The outcome of that failure will be reflected in the other error tags. Allowed Values = [true]
    • ipc.endpoint: The name of the endpoint/function/feature the message was sent to within the server (eg. the URL path prefix for a java servlet, or the grpc endpoint name).
    • ipc.attempt: Which attempt at sending this message is this. Allowed Values = [initial, second, third_up] (initial is the first attempt, second is 2nd attempt but first retry, third_up means third or higher attempt).
    • ipc.attempt.final: Indicates if this request was the final attempt of potentially multiple retry attempts. Allowed Values = [true, false].
    • ipc.server.app: The nf.app of the server the message is being sent to.
    • ipc.server.cluster: The nf.cluster of the server the message is being sent to.
    • ipc.server.asg: The nf.asg of the server the message is being sent to.
    • ipc.client.app: The nf.app of the server the message is being sent from.
    • ipc.client.cluster: The nf.cluster of the server the message is being sent from.
    • ipc.client.asg: The nf.asg of the server the message is being sent from.
    • owner: The library/impl publishing the metrics, eg. evcache, zuul, grpc, nodequark, platform_1_ipc, geoclient, etc ...
    • id: Conceptual name of service. Equivalent of RestClient name in NIWS.
    "},{"location":"spectator/specs/ipc/#allowed-values-for-ipcstatus-dimension","title":"Allowed Values for ipc.status Dimension","text":"
    • success: The request was successfully processed and responded to, as far as the client or server know.
    • bad_request: There was a problem with the clients' request causing it not to be fulfilled.
    • unexpected_error: The client or server encountered an unexpected error processing the request.
    • connection_error: There was an error with the underlying network connection either during establishment or while in use.
    • unavailable: There were no servers available to process the request.
    • throttled: The request was rejected due to the client or server considering the server to be above capacity.
    • timeout: The request could not or would not be complete within the configured threshold (either on client or server).
    • cancelled: The client cancelled the request before it was completed.
    • access_denied: The request was denied access for authentication or authorization reasons.
    "},{"location":"spectator/specs/ipc/#server-metrics","title":"Server Metrics","text":""},{"location":"spectator/specs/ipc/#ipcservercall","title":"ipc.server.call","text":"

    This is a percentile timer that is recorded for each inbound message to a server.

    Unit: seconds

    Dimensions:

    • ipc.protocol
    • ipc.result
    • ipc.vip
    • ipc.endpoint
    • ipc.status
    • ipc.status.detail
    • ipc.failure.injected
    • ipc.attempt
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcservercallsizeinbound","title":"ipc.server.call.size.inbound","text":"

    This is a distribution summary of the size in bytes of inbound messages received by a server.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcservercallsizeoutbound","title":"ipc.server.call.size.outbound","text":"

    This is a distribution summary of the size in bytes of outbound messages sent from a server.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcserverinflight","title":"ipc.server.inflight","text":"

    This is a distribution summary that shows the number of inbound IPC messages currently being processed in a server.

    Unit: inflight message count

    Dimensions:

    • ipc.protocol
    • ipc.endpoint
    • ipc.client.app
    • ipc.client.cluster
    • ipc.client.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#client-metrics","title":"Client Metrics","text":""},{"location":"spectator/specs/ipc/#ipcclientcall","title":"ipc.client.call","text":"

    This is a percentile timer that is recorded for each outbound message from a client.

    Unit: seconds

    Dimensions:

    • ipc.protocol
    • ipc.result
    • ipc.vip
    • ipc.endpoint
    • ipc.status
    • ipc.status.detail
    • ipc.failure.injected
    • ipc.attempt
    • ipc.attempt.final
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientcallsizeinbound","title":"ipc.client.call.size.inbound","text":"

    This is a distribution summary of the size in bytes of inbound messages received by a client.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientcallsizeoutbound","title":"ipc.client.call.size.outbound","text":"

    This is a distribution summary of the size in bytes of outbound messages sent from a client.

    Unit: bytes

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.result
    • ipc.status
    • ipc.status.detail
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "},{"location":"spectator/specs/ipc/#ipcclientinflight","title":"ipc.client.inflight","text":"

    This is a distribution summary that shows the number of currently outstanding outbound IPC messages from a client.

    Unit: inflight message count

    Dimensions:

    • ipc.protocol
    • ipc.vip
    • ipc.endpoint
    • ipc.server.app
    • ipc.server.cluster
    • ipc.server.asg
    • owner
    • id
    "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 60f4e869..1397e4a9 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,1237 +2,1242 @@ https://netflix.github.io/atlas-docs/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/getting-started/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/overview/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/fetch/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/tags/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/time-parameters/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/anonymization/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/axis-bounds/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/axis-scale/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/basics/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/color-palettes/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/examples/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/graph/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/heatmap/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/layout/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/legends/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/line-attributes/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/line-styles/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/multi-y/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/outputs/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/tick/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/time-shift/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/time-zone/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/api/graph/vision/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/alerting-expressions/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/alerting-philosophy/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/des/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/tutorial/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/-rot/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/2over/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/abs/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/add/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/all/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/alpha/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/and/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/area/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/as/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/axis/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/bottomk-others-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/bottomk-others-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/bottomk-others-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/bottomk-others-sum/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/bottomk/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/by/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/call/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cf-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cf-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cf-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cf-sum/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/clamp-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/clamp-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/clear/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/color/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/const/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/contains/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/count/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/cq/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/decode/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/delay/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/depth/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/derivative/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-epic-signal/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-epic-viz/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-fast/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-simple/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-slow/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des-slower/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/des/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/dist-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/dist-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/dist-stddev/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/div/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/drop/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/dup/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/each/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/ends/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/eq/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/eureka-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/fadd/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/false/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/fcall/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/fdiv/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/filter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/fmul/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/format/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/freeze/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/fsub/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/ge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/get/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/gt/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/has/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/head/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/heatmap/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/in/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/integral/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/le/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/legend/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/limit/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/line/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/list/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/ls/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/lt/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/lw/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/map/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/median/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/mul/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/named-rewrite/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/ndrop/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/neg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/nip/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/nlist/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/node-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/not/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/offset/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/or/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/order/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/over/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/palette/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/pct/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/per-step/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/percentiles-heatmap/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/percentiles/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/pick/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/pow/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/random/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/re/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/reic/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/roll/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rolling-count/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rolling-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rolling-mean/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rolling-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rolling-sum/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/rot/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/s/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sdes-fast/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sdes-simple/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sdes-slow/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sdes-slower/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sdes/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/set/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sort/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sqrt/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/srandom/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sset/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stack/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/starts/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-avg-mf/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-count/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-last/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-max-mf/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-min-mf/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat-total/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stat/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/stddev/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sub/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/sum/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/swap/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/time-span/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/time/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/topk-others-avg/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/topk-others-max/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/topk-others-min/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/topk-others-sum/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/topk/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/trend/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/true/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/tuck/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/asl/ref/vspan/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/concepts/consolidation/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/concepts/naming/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/concepts/normalization/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/concepts/time-series/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/agent/metrics/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/agent/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/core/clock/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/core/meters/counter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/core/meters/dist-summary/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/core/meters/gauge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/core/meters/timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/overview/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/cpp/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/go/migrations/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/go/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/servo-migration/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/testing/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-buffer-pools/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-classloading/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-compilation/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-gc-causes/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-gc/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-memory-pools/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-safepoint/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/jvm-threads/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/log4j1/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/log4j2/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/placeholders/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/ext/thread-pools/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/meters/counter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/meters/dist-summary/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/meters/gauge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/meters/percentile-timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/meters/timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/registry/metrics3/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/java/registry/overview/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-cpu/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-eventloop/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-filedescriptor/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-gc/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-heap/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-heapspace/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/ext/nodejs-memory/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/meters/counter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/meters/dist-summary/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/meters/gauge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/meters/percentile-timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/nodejs/meters/timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/migrations/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/usage/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/age-gauge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/counter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/dist-summary/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/gauge/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/max-gauge/ - 2024-07-18 + 2024-08-03 daily - https://netflix.github.io/atlas-docs/spectator/lang/py/meters/mono-counter/ - 2024-07-18 + https://netflix.github.io/atlas-docs/spectator/lang/py/meters/monotonic-counter-uint/ + 2024-08-03 daily - https://netflix.github.io/atlas-docs/spectator/lang/py/meters/pct-dist-summary/ - 2024-07-18 + https://netflix.github.io/atlas-docs/spectator/lang/py/meters/monotonic-counter/ + 2024-08-03 daily - https://netflix.github.io/atlas-docs/spectator/lang/py/meters/pct-timer/ - 2024-07-18 + https://netflix.github.io/atlas-docs/spectator/lang/py/meters/percentile-dist-summary/ + 2024-08-03 + daily + + + https://netflix.github.io/atlas-docs/spectator/lang/py/meters/percentile-timer/ + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/py/meters/timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/lang/rb/deprecated/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/cardinality-limiter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/gauge-poller/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/interval-counter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/long-task-timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/percentile-timer/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/patterns/polled-meter/ - 2024-07-18 + 2024-08-03 daily https://netflix.github.io/atlas-docs/spectator/specs/ipc/ - 2024-07-18 + 2024-08-03 daily \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index bef3a99d40ad6fb558dd1b7e7c284ffedeed7912..443e4a53a77a9d13eb91ced5eaa9781b56fd814c 100644 GIT binary patch delta 1467 zcmV;s1w{J849*OHABzYG0CTNo0{?SqbY*Q}a4vXlYyjPz&r;(!5Qp#c6cl%3hMB|c z7Q@{333eYqk=wRFmNaTPAw2!cP6#mUpQ=4<FZA#EmQ37Zmw@WH`ia9>reHP<9n|zv!e-v^jW^&b;kMqZTsq-15$fp`pZ&I2R7C3_^0l-UVq)DcNmil9yPywd_fbVw!z_am^mXTl-y_3#WaHvkxHH8X@cG z`ocnfih&ccZJh$*c=Z6n8b;;`sgT)x{0O4>7j{bl&lS> zjeh_SpUfi^+yy4M&JzoqLc52P3j!S_124yFy%x4DK8-P5T`J z+KG*wU|_x3W0Oq+8UX^6XaZ&d36swP8Gk#{VwON%C^BSB_861XJQO=pVBO>jhBaVp zz+u)5JY~f4m=>ihXK)5pPNsd5&KXJwz}c_G$pHjYO`yrL%dSv{s{mTV2tvsT2Zw1> zP`naiC!mFafGMwyeXp2|5N1Pc%pE4v4yvy{KER7YYbpIINp7@A;^0~d#d*<&m47CD zMNT0GJV@vn6|V3vZqJ!Yr6GS${v^x~EwNy>sV=}6R8T!Ir~ZSJQ_>w~ zx$JN-M8c#*7hVc#Ou$FgzNY5H3vZ|~gYau=j(1zADX5uKF;eo3@oY0DF|q}WZ8iog zsjM$Fde;%o13f_pQyuPrp$sL6Z9cSlkCoZQ9_g0I#E0OU8FCZW`B@S+DS@2 z|I=1~q@LBBeNp@ivyE9=TR-@~LOvcUz2;LTBiRp+DVwLsO~RJ3{RO$L#ngvIb^SYk4Tky%ML)NAhJlw&}N?R6^vri&!9n5jltF$-0SZWa?D}9u2L`Tb|74S7CsS&J*Kp37+__-9EBj_W zF$C29ClTui(WK}-k$yR?CvT0%`P7pFAH&cTqYp6ea}t2{pDzj)w6JutwMLsc$P^DP%JOnF@2uW7YfEwy#~bwBxwtz>mE ze7XF1b$xju!N$&o;mhS8zyH>JzWjRk;dahmrCijDgMPVx97{>Rw(V}WyHZzVGD)vu z9$K|y{YTrUH{0v0UoP)HT-2ZC2FF+H&#jO4Zn=xr+XN}Scl)csm2uu&x!9WGjW%7h z+TOOyrLR9}v`n$PyScvk)Lj4EeEeK5d3x{FWp*@SkUq<|yUsX&xNTp(b3|%SO#hi~ zkWy+(HCjx6F4CPjZLHt(*2(V7F->x9FU5`k&SC~7V7f_~WU`s0)V>ZN9r*+6w~Z~i)#)T-rDcdTsZApoP7Yn)(BZg z*B2J@V+@>#ZR->e$Eybr)-W=!sR!AoEV{I-lCab0eAJl61Aw{{Lfe+P#Ow^gdb9Lx zlVgFP-Yl6j2&>sBg|zD^q~2pl(`YEH0rUN5!89tS_iqOZ>E2NqbrVa8(?b)>C|Mg$ z8-E86KAAlf+yy4M&JzoqLVJLd3j!S_BQM8ly%x4DK8-P5TW3 z+KG*wU|_vDV3SP(8UX~8XaZ&d5tGjX8Gn1C#VmokP-MuM>@g;%c`SCMz`Ds53~RvH zfWxdAc*=<7F)d12&fpBHoJ{*9oimgWfU{qVlLH8*nn06fmtCO@R{^w!5rmQx4i3|% zpm-(3PCyF-0aIQZ`(7~_Am;ts`CcPOm9Lvi&T$|>;R3$tL(WPkmL>jJHTF%$I_x5R?krn&%QP(k&;oca$=PDyu| z<+9_!5DAkKU3e*|F##V{`qPDBcagquntwq;X(uW9 z{7+l`o_bbu_C@h8%r<6eZT;W_3;F3#={27!8OeTpOxZk5CLfRj1~bzv{fT_>-5jIw z_2%n#^{tZ~i`u3HbBHVU5ZlQOd5uRDhYoD;d$Tod4W_oSIW;^`K@lzIA9w#1In`!l zU)uZa)NJOym)xWny@F-fdViJrXga19on*u*IZy4y&ngDnSTn0AS_2qX!=+$1kIGZ6M#&Yyi>u- znY_b)DdcT{Ih5B*sQ6ii+7W6Oc*>6dMApDeYAp|CxmV(p`H6fP)qjpJ8=Gdj1ld-P zp^!Qbit zM&WM-u;cKzT7Ux50lPj}(Sd<$Iy_rTMQ?!J_eB757q$)yFURp KD*X1atN;M5g~yNp diff --git a/spectator/agent/metrics/index.html b/spectator/agent/metrics/index.html index d96fe700..fd424c61 100644 --- a/spectator/agent/metrics/index.html +++ b/spectator/agent/metrics/index.html @@ -6670,7 +6670,7 @@ - Counters + Counter @@ -6691,7 +6691,7 @@ - Distribution Summaries + Distribution Summary @@ -6712,7 +6712,7 @@ - Gauges + Gauge @@ -6733,7 +6733,7 @@ - Percentile Timers + Percentile Timer @@ -6754,7 +6754,7 @@ - Timers + Timer @@ -6888,7 +6888,7 @@ - Age Gauges + Age Gauge @@ -6909,7 +6909,7 @@ - Counters + Counter @@ -6930,7 +6930,7 @@ - Distribution Summaries + Distribution Summary @@ -6972,7 +6972,7 @@ - Max Gauges + Max Gauge @@ -6989,11 +6989,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7010,11 +7010,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7031,11 +7031,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7056,7 +7077,7 @@ - Timers + Timer diff --git a/spectator/agent/usage/index.html b/spectator/agent/usage/index.html index 3a278e12..1bf28869 100644 --- a/spectator/agent/usage/index.html +++ b/spectator/agent/usage/index.html @@ -6718,7 +6718,7 @@ - Counters + Counter @@ -6739,7 +6739,7 @@ - Distribution Summaries + Distribution Summary @@ -6760,7 +6760,7 @@ - Gauges + Gauge @@ -6781,7 +6781,7 @@ - Percentile Timers + Percentile Timer @@ -6802,7 +6802,7 @@ - Timers + Timer @@ -6936,7 +6936,7 @@ - Age Gauges + Age Gauge @@ -6957,7 +6957,7 @@ - Counters + Counter @@ -6978,7 +6978,7 @@ - Distribution Summaries + Distribution Summary @@ -7020,7 +7020,7 @@ - Max Gauges + Max Gauge @@ -7037,11 +7037,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7058,11 +7058,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7079,11 +7079,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7104,7 +7125,7 @@ - Timers + Timer diff --git a/spectator/core/clock/index.html b/spectator/core/clock/index.html index 2ae6b0b9..b2f00d96 100644 --- a/spectator/core/clock/index.html +++ b/spectator/core/clock/index.html @@ -6625,7 +6625,7 @@ - Counters + Counter @@ -6646,7 +6646,7 @@ - Distribution Summaries + Distribution Summary @@ -6667,7 +6667,7 @@ - Gauges + Gauge @@ -6688,7 +6688,7 @@ - Percentile Timers + Percentile Timer @@ -6709,7 +6709,7 @@ - Timers + Timer @@ -6843,7 +6843,7 @@ - Age Gauges + Age Gauge @@ -6864,7 +6864,7 @@ - Counters + Counter @@ -6885,7 +6885,7 @@ - Distribution Summaries + Distribution Summary @@ -6927,7 +6927,7 @@ - Max Gauges + Max Gauge @@ -6944,11 +6944,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6965,11 +6965,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6986,11 +6986,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7011,7 +7032,7 @@ - Timers + Timer diff --git a/spectator/core/meters/counter/index.html b/spectator/core/meters/counter/index.html index bbe0dd37..f230798f 100644 --- a/spectator/core/meters/counter/index.html +++ b/spectator/core/meters/counter/index.html @@ -6633,7 +6633,7 @@ - Counters + Counter @@ -6654,7 +6654,7 @@ - Distribution Summaries + Distribution Summary @@ -6675,7 +6675,7 @@ - Gauges + Gauge @@ -6696,7 +6696,7 @@ - Percentile Timers + Percentile Timer @@ -6717,7 +6717,7 @@ - Timers + Timer @@ -6851,7 +6851,7 @@ - Age Gauges + Age Gauge @@ -6872,7 +6872,7 @@ - Counters + Counter @@ -6893,7 +6893,7 @@ - Distribution Summaries + Distribution Summary @@ -6935,7 +6935,7 @@ - Max Gauges + Max Gauge @@ -6952,11 +6952,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6973,11 +6973,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6994,11 +6994,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7019,7 +7040,7 @@ - Timers + Timer diff --git a/spectator/core/meters/dist-summary/index.html b/spectator/core/meters/dist-summary/index.html index 37e387d4..b743e3d1 100644 --- a/spectator/core/meters/dist-summary/index.html +++ b/spectator/core/meters/dist-summary/index.html @@ -6684,7 +6684,7 @@ - Counters + Counter @@ -6705,7 +6705,7 @@ - Distribution Summaries + Distribution Summary @@ -6726,7 +6726,7 @@ - Gauges + Gauge @@ -6747,7 +6747,7 @@ - Percentile Timers + Percentile Timer @@ -6768,7 +6768,7 @@ - Timers + Timer @@ -6902,7 +6902,7 @@ - Age Gauges + Age Gauge @@ -6923,7 +6923,7 @@ - Counters + Counter @@ -6944,7 +6944,7 @@ - Distribution Summaries + Distribution Summary @@ -6986,7 +6986,7 @@ - Max Gauges + Max Gauge @@ -7003,11 +7003,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7024,11 +7024,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7045,11 +7045,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7070,7 +7091,7 @@ - Timers + Timer diff --git a/spectator/core/meters/gauge/index.html b/spectator/core/meters/gauge/index.html index c0c6628f..817c2859 100644 --- a/spectator/core/meters/gauge/index.html +++ b/spectator/core/meters/gauge/index.html @@ -6633,7 +6633,7 @@ - Counters + Counter @@ -6654,7 +6654,7 @@ - Distribution Summaries + Distribution Summary @@ -6675,7 +6675,7 @@ - Gauges + Gauge @@ -6696,7 +6696,7 @@ - Percentile Timers + Percentile Timer @@ -6717,7 +6717,7 @@ - Timers + Timer @@ -6851,7 +6851,7 @@ - Age Gauges + Age Gauge @@ -6872,7 +6872,7 @@ - Counters + Counter @@ -6893,7 +6893,7 @@ - Distribution Summaries + Distribution Summary @@ -6935,7 +6935,7 @@ - Max Gauges + Max Gauge @@ -6952,11 +6952,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6973,11 +6973,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6994,11 +6994,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7019,7 +7040,7 @@ - Timers + Timer diff --git a/spectator/core/meters/timer/index.html b/spectator/core/meters/timer/index.html index d90b1858..7244c53b 100644 --- a/spectator/core/meters/timer/index.html +++ b/spectator/core/meters/timer/index.html @@ -6684,7 +6684,7 @@ - Counters + Counter @@ -6705,7 +6705,7 @@ - Distribution Summaries + Distribution Summary @@ -6726,7 +6726,7 @@ - Gauges + Gauge @@ -6747,7 +6747,7 @@ - Percentile Timers + Percentile Timer @@ -6768,7 +6768,7 @@ - Timers + Timer @@ -6902,7 +6902,7 @@ - Age Gauges + Age Gauge @@ -6923,7 +6923,7 @@ - Counters + Counter @@ -6944,7 +6944,7 @@ - Distribution Summaries + Distribution Summary @@ -6986,7 +6986,7 @@ - Max Gauges + Max Gauge @@ -7003,11 +7003,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7024,11 +7024,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7045,11 +7045,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7070,7 +7091,7 @@ - Timers + Timer diff --git a/spectator/index.html b/spectator/index.html index 3cfeef35..37355273 100644 --- a/spectator/index.html +++ b/spectator/index.html @@ -6559,7 +6559,7 @@ - Counters + Counter @@ -6580,7 +6580,7 @@ - Distribution Summaries + Distribution Summary @@ -6601,7 +6601,7 @@ - Gauges + Gauge @@ -6622,7 +6622,7 @@ - Percentile Timers + Percentile Timer @@ -6643,7 +6643,7 @@ - Timers + Timer @@ -6777,7 +6777,7 @@ - Age Gauges + Age Gauge @@ -6798,7 +6798,7 @@ - Counters + Counter @@ -6819,7 +6819,7 @@ - Distribution Summaries + Distribution Summary @@ -6861,7 +6861,7 @@ - Max Gauges + Max Gauge @@ -6878,11 +6878,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6899,11 +6899,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6920,11 +6920,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6945,7 +6966,7 @@ - Timers + Timer diff --git a/spectator/lang/cpp/usage/index.html b/spectator/lang/cpp/usage/index.html index 1a7eb231..e893f558 100644 --- a/spectator/lang/cpp/usage/index.html +++ b/spectator/lang/cpp/usage/index.html @@ -6618,7 +6618,7 @@ - Counters + Counter @@ -6639,7 +6639,7 @@ - Distribution Summaries + Distribution Summary @@ -6660,7 +6660,7 @@ - Gauges + Gauge @@ -6681,7 +6681,7 @@ - Percentile Timers + Percentile Timer @@ -6702,7 +6702,7 @@ - Timers + Timer @@ -6836,7 +6836,7 @@ - Age Gauges + Age Gauge @@ -6857,7 +6857,7 @@ - Counters + Counter @@ -6878,7 +6878,7 @@ - Distribution Summaries + Distribution Summary @@ -6920,7 +6920,7 @@ - Max Gauges + Max Gauge @@ -6937,11 +6937,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6958,11 +6958,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6979,11 +6979,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7004,7 +7025,7 @@ - Timers + Timer diff --git a/spectator/lang/go/migrations/index.html b/spectator/lang/go/migrations/index.html index fa486552..b128b9a0 100644 --- a/spectator/lang/go/migrations/index.html +++ b/spectator/lang/go/migrations/index.html @@ -6698,7 +6698,7 @@ - Counters + Counter @@ -6719,7 +6719,7 @@ - Distribution Summaries + Distribution Summary @@ -6740,7 +6740,7 @@ - Gauges + Gauge @@ -6761,7 +6761,7 @@ - Percentile Timers + Percentile Timer @@ -6782,7 +6782,7 @@ - Timers + Timer @@ -6916,7 +6916,7 @@ - Age Gauges + Age Gauge @@ -6937,7 +6937,7 @@ - Counters + Counter @@ -6958,7 +6958,7 @@ - Distribution Summaries + Distribution Summary @@ -7000,7 +7000,7 @@ - Max Gauges + Max Gauge @@ -7017,11 +7017,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7038,11 +7038,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7059,11 +7059,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7084,7 +7105,7 @@ - Timers + Timer diff --git a/spectator/lang/go/usage/index.html b/spectator/lang/go/usage/index.html index a9728d5f..0851a4f4 100644 --- a/spectator/lang/go/usage/index.html +++ b/spectator/lang/go/usage/index.html @@ -6636,7 +6636,7 @@ - Counters + Counter @@ -6657,7 +6657,7 @@ - Distribution Summaries + Distribution Summary @@ -6678,7 +6678,7 @@ - Gauges + Gauge @@ -6699,7 +6699,7 @@ - Percentile Timers + Percentile Timer @@ -6720,7 +6720,7 @@ - Timers + Timer @@ -6854,7 +6854,7 @@ - Age Gauges + Age Gauge @@ -6875,7 +6875,7 @@ - Counters + Counter @@ -6896,7 +6896,7 @@ - Distribution Summaries + Distribution Summary @@ -6938,7 +6938,7 @@ - Max Gauges + Max Gauge @@ -6955,11 +6955,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6976,11 +6976,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6997,11 +6997,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7022,7 +7043,7 @@ - Timers + Timer @@ -7555,13 +7576,14 @@

    Instrumenting Code}

  • Logging

    -

    Logging is implemented with the standard Golang slog package. The logger defines interfaces -for Debugf, Infof, and Errorf. There are useful messages implemented at the Debug level which can -help diagnose the metric publishing workflow. The logger can be overridden by providing one as the -third parameter of the Config constructor.

    +

    Logging is implemented with the standard Golang slog package. The +logger defines interfaces for Debugf, Infof, and Errorf. There are useful messages implemented at +the Debug level which can help diagnose the metric publishing workflow. The logger can be overridden +by providing one as the third parameter of the Config constructor.

    Runtime Metrics

    -

    Use spectator-go-runtime-metrics. Follow instructions -in the README to enable collection.

    +

    Use spectator-go-runtime-metrics. Follow +instructions in the README to enable +collection.

    diff --git a/spectator/lang/java/ext/jvm-buffer-pools/index.html b/spectator/lang/java/ext/jvm-buffer-pools/index.html index 3318035e..c5d1e790 100644 --- a/spectator/lang/java/ext/jvm-buffer-pools/index.html +++ b/spectator/lang/java/ext/jvm-buffer-pools/index.html @@ -6646,7 +6646,7 @@ - Counters + Counter @@ -6667,7 +6667,7 @@ - Distribution Summaries + Distribution Summary @@ -6688,7 +6688,7 @@ - Gauges + Gauge @@ -6709,7 +6709,7 @@ - Percentile Timers + Percentile Timer @@ -6730,7 +6730,7 @@ - Timers + Timer @@ -6864,7 +6864,7 @@ - Age Gauges + Age Gauge @@ -6885,7 +6885,7 @@ - Counters + Counter @@ -6906,7 +6906,7 @@ - Distribution Summaries + Distribution Summary @@ -6948,7 +6948,7 @@ - Max Gauges + Max Gauge @@ -6965,11 +6965,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6986,11 +6986,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7007,11 +7007,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7032,7 +7053,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-classloading/index.html b/spectator/lang/java/ext/jvm-classloading/index.html index ac638177..fce6f128 100644 --- a/spectator/lang/java/ext/jvm-classloading/index.html +++ b/spectator/lang/java/ext/jvm-classloading/index.html @@ -6646,7 +6646,7 @@ - Counters + Counter @@ -6667,7 +6667,7 @@ - Distribution Summaries + Distribution Summary @@ -6688,7 +6688,7 @@ - Gauges + Gauge @@ -6709,7 +6709,7 @@ - Percentile Timers + Percentile Timer @@ -6730,7 +6730,7 @@ - Timers + Timer @@ -6864,7 +6864,7 @@ - Age Gauges + Age Gauge @@ -6885,7 +6885,7 @@ - Counters + Counter @@ -6906,7 +6906,7 @@ - Distribution Summaries + Distribution Summary @@ -6948,7 +6948,7 @@ - Max Gauges + Max Gauge @@ -6965,11 +6965,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6986,11 +6986,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7007,11 +7007,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7032,7 +7053,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-compilation/index.html b/spectator/lang/java/ext/jvm-compilation/index.html index fbab0f17..981f4729 100644 --- a/spectator/lang/java/ext/jvm-compilation/index.html +++ b/spectator/lang/java/ext/jvm-compilation/index.html @@ -6637,7 +6637,7 @@ - Counters + Counter @@ -6658,7 +6658,7 @@ - Distribution Summaries + Distribution Summary @@ -6679,7 +6679,7 @@ - Gauges + Gauge @@ -6700,7 +6700,7 @@ - Percentile Timers + Percentile Timer @@ -6721,7 +6721,7 @@ - Timers + Timer @@ -6855,7 +6855,7 @@ - Age Gauges + Age Gauge @@ -6876,7 +6876,7 @@ - Counters + Counter @@ -6897,7 +6897,7 @@ - Distribution Summaries + Distribution Summary @@ -6939,7 +6939,7 @@ - Max Gauges + Max Gauge @@ -6956,11 +6956,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6977,11 +6977,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6998,11 +6998,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7023,7 +7044,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-gc-causes/index.html b/spectator/lang/java/ext/jvm-gc-causes/index.html index 9a2dd696..71f05ef2 100644 --- a/spectator/lang/java/ext/jvm-gc-causes/index.html +++ b/spectator/lang/java/ext/jvm-gc-causes/index.html @@ -6838,7 +6838,7 @@ - Counters + Counter @@ -6859,7 +6859,7 @@ - Distribution Summaries + Distribution Summary @@ -6880,7 +6880,7 @@ - Gauges + Gauge @@ -6901,7 +6901,7 @@ - Percentile Timers + Percentile Timer @@ -6922,7 +6922,7 @@ - Timers + Timer @@ -7056,7 +7056,7 @@ - Age Gauges + Age Gauge @@ -7077,7 +7077,7 @@ - Counters + Counter @@ -7098,7 +7098,7 @@ - Distribution Summaries + Distribution Summary @@ -7140,7 +7140,7 @@ - Max Gauges + Max Gauge @@ -7157,11 +7157,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7178,11 +7178,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7199,11 +7199,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7224,7 +7245,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-gc/index.html b/spectator/lang/java/ext/jvm-gc/index.html index 3012d343..7381cae1 100644 --- a/spectator/lang/java/ext/jvm-gc/index.html +++ b/spectator/lang/java/ext/jvm-gc/index.html @@ -6757,7 +6757,7 @@ - Counters + Counter @@ -6778,7 +6778,7 @@ - Distribution Summaries + Distribution Summary @@ -6799,7 +6799,7 @@ - Gauges + Gauge @@ -6820,7 +6820,7 @@ - Percentile Timers + Percentile Timer @@ -6841,7 +6841,7 @@ - Timers + Timer @@ -6975,7 +6975,7 @@ - Age Gauges + Age Gauge @@ -6996,7 +6996,7 @@ - Counters + Counter @@ -7017,7 +7017,7 @@ - Distribution Summaries + Distribution Summary @@ -7059,7 +7059,7 @@ - Max Gauges + Max Gauge @@ -7076,11 +7076,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7097,11 +7097,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7118,11 +7118,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7143,7 +7164,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-memory-pools/index.html b/spectator/lang/java/ext/jvm-memory-pools/index.html index ab01c68f..3fe813a5 100644 --- a/spectator/lang/java/ext/jvm-memory-pools/index.html +++ b/spectator/lang/java/ext/jvm-memory-pools/index.html @@ -6664,7 +6664,7 @@ - Counters + Counter @@ -6685,7 +6685,7 @@ - Distribution Summaries + Distribution Summary @@ -6706,7 +6706,7 @@ - Gauges + Gauge @@ -6727,7 +6727,7 @@ - Percentile Timers + Percentile Timer @@ -6748,7 +6748,7 @@ - Timers + Timer @@ -6882,7 +6882,7 @@ - Age Gauges + Age Gauge @@ -6903,7 +6903,7 @@ - Counters + Counter @@ -6924,7 +6924,7 @@ - Distribution Summaries + Distribution Summary @@ -6966,7 +6966,7 @@ - Max Gauges + Max Gauge @@ -6983,11 +6983,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7004,11 +7004,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7025,11 +7025,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7050,7 +7071,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-safepoint/index.html b/spectator/lang/java/ext/jvm-safepoint/index.html index d7bfaeee..b05f7389 100644 --- a/spectator/lang/java/ext/jvm-safepoint/index.html +++ b/spectator/lang/java/ext/jvm-safepoint/index.html @@ -6646,7 +6646,7 @@ - Counters + Counter @@ -6667,7 +6667,7 @@ - Distribution Summaries + Distribution Summary @@ -6688,7 +6688,7 @@ - Gauges + Gauge @@ -6709,7 +6709,7 @@ - Percentile Timers + Percentile Timer @@ -6730,7 +6730,7 @@ - Timers + Timer @@ -6864,7 +6864,7 @@ - Age Gauges + Age Gauge @@ -6885,7 +6885,7 @@ - Counters + Counter @@ -6906,7 +6906,7 @@ - Distribution Summaries + Distribution Summary @@ -6948,7 +6948,7 @@ - Max Gauges + Max Gauge @@ -6965,11 +6965,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6986,11 +6986,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7007,11 +7007,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7032,7 +7053,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/jvm-threads/index.html b/spectator/lang/java/ext/jvm-threads/index.html index 1a52fa5f..de693654 100644 --- a/spectator/lang/java/ext/jvm-threads/index.html +++ b/spectator/lang/java/ext/jvm-threads/index.html @@ -6646,7 +6646,7 @@ - Counters + Counter @@ -6667,7 +6667,7 @@ - Distribution Summaries + Distribution Summary @@ -6688,7 +6688,7 @@ - Gauges + Gauge @@ -6709,7 +6709,7 @@ - Percentile Timers + Percentile Timer @@ -6730,7 +6730,7 @@ - Timers + Timer @@ -6864,7 +6864,7 @@ - Age Gauges + Age Gauge @@ -6885,7 +6885,7 @@ - Counters + Counter @@ -6906,7 +6906,7 @@ - Distribution Summaries + Distribution Summary @@ -6948,7 +6948,7 @@ - Max Gauges + Max Gauge @@ -6965,11 +6965,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6986,11 +6986,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7007,11 +7007,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7032,7 +7053,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/log4j1/index.html b/spectator/lang/java/ext/log4j1/index.html index 53339105..d87bbb2a 100644 --- a/spectator/lang/java/ext/log4j1/index.html +++ b/spectator/lang/java/ext/log4j1/index.html @@ -6644,7 +6644,7 @@ - Counters + Counter @@ -6665,7 +6665,7 @@ - Distribution Summaries + Distribution Summary @@ -6686,7 +6686,7 @@ - Gauges + Gauge @@ -6707,7 +6707,7 @@ - Percentile Timers + Percentile Timer @@ -6728,7 +6728,7 @@ - Timers + Timer @@ -6862,7 +6862,7 @@ - Age Gauges + Age Gauge @@ -6883,7 +6883,7 @@ - Counters + Counter @@ -6904,7 +6904,7 @@ - Distribution Summaries + Distribution Summary @@ -6946,7 +6946,7 @@ - Max Gauges + Max Gauge @@ -6963,11 +6963,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6984,11 +6984,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7005,11 +7005,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7030,7 +7051,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/log4j2/index.html b/spectator/lang/java/ext/log4j2/index.html index 6608298c..5e1ab89b 100644 --- a/spectator/lang/java/ext/log4j2/index.html +++ b/spectator/lang/java/ext/log4j2/index.html @@ -6644,7 +6644,7 @@ - Counters + Counter @@ -6665,7 +6665,7 @@ - Distribution Summaries + Distribution Summary @@ -6686,7 +6686,7 @@ - Gauges + Gauge @@ -6707,7 +6707,7 @@ - Percentile Timers + Percentile Timer @@ -6728,7 +6728,7 @@ - Timers + Timer @@ -6862,7 +6862,7 @@ - Age Gauges + Age Gauge @@ -6883,7 +6883,7 @@ - Counters + Counter @@ -6904,7 +6904,7 @@ - Distribution Summaries + Distribution Summary @@ -6946,7 +6946,7 @@ - Max Gauges + Max Gauge @@ -6963,11 +6963,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6984,11 +6984,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7005,11 +7005,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7030,7 +7051,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/placeholders/index.html b/spectator/lang/java/ext/placeholders/index.html index f4bac6fc..972cfb09 100644 --- a/spectator/lang/java/ext/placeholders/index.html +++ b/spectator/lang/java/ext/placeholders/index.html @@ -6620,7 +6620,7 @@ - Counters + Counter @@ -6641,7 +6641,7 @@ - Distribution Summaries + Distribution Summary @@ -6662,7 +6662,7 @@ - Gauges + Gauge @@ -6683,7 +6683,7 @@ - Percentile Timers + Percentile Timer @@ -6704,7 +6704,7 @@ - Timers + Timer @@ -6838,7 +6838,7 @@ - Age Gauges + Age Gauge @@ -6859,7 +6859,7 @@ - Counters + Counter @@ -6880,7 +6880,7 @@ - Distribution Summaries + Distribution Summary @@ -6922,7 +6922,7 @@ - Max Gauges + Max Gauge @@ -6939,11 +6939,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6960,11 +6960,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6981,11 +6981,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7006,7 +7027,7 @@ - Timers + Timer diff --git a/spectator/lang/java/ext/thread-pools/index.html b/spectator/lang/java/ext/thread-pools/index.html index c769d95b..a7d36eab 100644 --- a/spectator/lang/java/ext/thread-pools/index.html +++ b/spectator/lang/java/ext/thread-pools/index.html @@ -6691,7 +6691,7 @@ - Counters + Counter @@ -6712,7 +6712,7 @@ - Distribution Summaries + Distribution Summary @@ -6733,7 +6733,7 @@ - Gauges + Gauge @@ -6754,7 +6754,7 @@ - Percentile Timers + Percentile Timer @@ -6775,7 +6775,7 @@ - Timers + Timer @@ -6909,7 +6909,7 @@ - Age Gauges + Age Gauge @@ -6930,7 +6930,7 @@ - Counters + Counter @@ -6951,7 +6951,7 @@ - Distribution Summaries + Distribution Summary @@ -6993,7 +6993,7 @@ - Max Gauges + Max Gauge @@ -7010,11 +7010,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7031,11 +7031,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7052,11 +7052,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7077,7 +7098,7 @@ - Timers + Timer diff --git a/spectator/lang/java/meters/counter/index.html b/spectator/lang/java/meters/counter/index.html index 88aa551c..0fc436f8 100644 --- a/spectator/lang/java/meters/counter/index.html +++ b/spectator/lang/java/meters/counter/index.html @@ -6572,7 +6572,7 @@ - Counters + Counter @@ -6593,7 +6593,7 @@ - Distribution Summaries + Distribution Summary @@ -6614,7 +6614,7 @@ - Gauges + Gauge @@ -6635,7 +6635,7 @@ - Percentile Timers + Percentile Timer @@ -6656,7 +6656,7 @@ - Timers + Timer @@ -6790,7 +6790,7 @@ - Age Gauges + Age Gauge @@ -6811,7 +6811,7 @@ - Counters + Counter @@ -6832,7 +6832,7 @@ - Distribution Summaries + Distribution Summary @@ -6874,7 +6874,7 @@ - Max Gauges + Max Gauge @@ -6891,11 +6891,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6912,11 +6912,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6933,11 +6933,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6958,7 +6979,7 @@ - Timers + Timer diff --git a/spectator/lang/java/meters/dist-summary/index.html b/spectator/lang/java/meters/dist-summary/index.html index e78b4597..774d5d03 100644 --- a/spectator/lang/java/meters/dist-summary/index.html +++ b/spectator/lang/java/meters/dist-summary/index.html @@ -6572,7 +6572,7 @@ - Counters + Counter @@ -6593,7 +6593,7 @@ - Distribution Summaries + Distribution Summary @@ -6614,7 +6614,7 @@ - Gauges + Gauge @@ -6635,7 +6635,7 @@ - Percentile Timers + Percentile Timer @@ -6656,7 +6656,7 @@ - Timers + Timer @@ -6790,7 +6790,7 @@ - Age Gauges + Age Gauge @@ -6811,7 +6811,7 @@ - Counters + Counter @@ -6832,7 +6832,7 @@ - Distribution Summaries + Distribution Summary @@ -6874,7 +6874,7 @@ - Max Gauges + Max Gauge @@ -6891,11 +6891,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6912,11 +6912,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6933,11 +6933,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6958,7 +6979,7 @@ - Timers + Timer diff --git a/spectator/lang/java/meters/gauge/index.html b/spectator/lang/java/meters/gauge/index.html index 1d1642b2..dd5f7aed 100644 --- a/spectator/lang/java/meters/gauge/index.html +++ b/spectator/lang/java/meters/gauge/index.html @@ -6662,7 +6662,7 @@ - Counters + Counter @@ -6683,7 +6683,7 @@ - Distribution Summaries + Distribution Summary @@ -6704,7 +6704,7 @@ - Gauges + Gauge @@ -6725,7 +6725,7 @@ - Percentile Timers + Percentile Timer @@ -6746,7 +6746,7 @@ - Timers + Timer @@ -6880,7 +6880,7 @@ - Age Gauges + Age Gauge @@ -6901,7 +6901,7 @@ - Counters + Counter @@ -6922,7 +6922,7 @@ - Distribution Summaries + Distribution Summary @@ -6964,7 +6964,7 @@ - Max Gauges + Max Gauge @@ -6981,11 +6981,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7002,11 +7002,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7023,11 +7023,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7048,7 +7069,7 @@ - Timers + Timer diff --git a/spectator/lang/java/meters/percentile-timer/index.html b/spectator/lang/java/meters/percentile-timer/index.html index 14ddf2ad..bc52a53c 100644 --- a/spectator/lang/java/meters/percentile-timer/index.html +++ b/spectator/lang/java/meters/percentile-timer/index.html @@ -6572,7 +6572,7 @@ - Counters + Counter @@ -6593,7 +6593,7 @@ - Distribution Summaries + Distribution Summary @@ -6614,7 +6614,7 @@ - Gauges + Gauge @@ -6635,7 +6635,7 @@ - Percentile Timers + Percentile Timer @@ -6656,7 +6656,7 @@ - Timers + Timer @@ -6790,7 +6790,7 @@ - Age Gauges + Age Gauge @@ -6811,7 +6811,7 @@ - Counters + Counter @@ -6832,7 +6832,7 @@ - Distribution Summaries + Distribution Summary @@ -6874,7 +6874,7 @@ - Max Gauges + Max Gauge @@ -6891,11 +6891,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6912,11 +6912,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6933,11 +6933,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6958,7 +6979,7 @@ - Timers + Timer diff --git a/spectator/lang/java/meters/timer/index.html b/spectator/lang/java/meters/timer/index.html index 40932426..c60b2100 100644 --- a/spectator/lang/java/meters/timer/index.html +++ b/spectator/lang/java/meters/timer/index.html @@ -6620,7 +6620,7 @@ - Counters + Counter @@ -6641,7 +6641,7 @@ - Distribution Summaries + Distribution Summary @@ -6662,7 +6662,7 @@ - Gauges + Gauge @@ -6683,7 +6683,7 @@ - Percentile Timers + Percentile Timer @@ -6704,7 +6704,7 @@ - Timers + Timer @@ -6838,7 +6838,7 @@ - Age Gauges + Age Gauge @@ -6859,7 +6859,7 @@ - Counters + Counter @@ -6880,7 +6880,7 @@ - Distribution Summaries + Distribution Summary @@ -6922,7 +6922,7 @@ - Max Gauges + Max Gauge @@ -6939,11 +6939,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6960,11 +6960,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6981,11 +6981,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7006,7 +7027,7 @@ - Timers + Timer diff --git a/spectator/lang/java/registry/metrics3/index.html b/spectator/lang/java/registry/metrics3/index.html index 84c45aa5..27ed00c1 100644 --- a/spectator/lang/java/registry/metrics3/index.html +++ b/spectator/lang/java/registry/metrics3/index.html @@ -6572,7 +6572,7 @@ - Counters + Counter @@ -6593,7 +6593,7 @@ - Distribution Summaries + Distribution Summary @@ -6614,7 +6614,7 @@ - Gauges + Gauge @@ -6635,7 +6635,7 @@ - Percentile Timers + Percentile Timer @@ -6656,7 +6656,7 @@ - Timers + Timer @@ -6790,7 +6790,7 @@ - Age Gauges + Age Gauge @@ -6811,7 +6811,7 @@ - Counters + Counter @@ -6832,7 +6832,7 @@ - Distribution Summaries + Distribution Summary @@ -6874,7 +6874,7 @@ - Max Gauges + Max Gauge @@ -6891,11 +6891,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6912,11 +6912,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6933,11 +6933,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6958,7 +6979,7 @@ - Timers + Timer diff --git a/spectator/lang/java/registry/overview/index.html b/spectator/lang/java/registry/overview/index.html index a6eb2ad4..b51b5550 100644 --- a/spectator/lang/java/registry/overview/index.html +++ b/spectator/lang/java/registry/overview/index.html @@ -6662,7 +6662,7 @@ - Counters + Counter @@ -6683,7 +6683,7 @@ - Distribution Summaries + Distribution Summary @@ -6704,7 +6704,7 @@ - Gauges + Gauge @@ -6725,7 +6725,7 @@ - Percentile Timers + Percentile Timer @@ -6746,7 +6746,7 @@ - Timers + Timer @@ -6880,7 +6880,7 @@ - Age Gauges + Age Gauge @@ -6901,7 +6901,7 @@ - Counters + Counter @@ -6922,7 +6922,7 @@ - Distribution Summaries + Distribution Summary @@ -6964,7 +6964,7 @@ - Max Gauges + Max Gauge @@ -6981,11 +6981,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7002,11 +7002,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7023,11 +7023,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7048,7 +7069,7 @@ - Timers + Timer diff --git a/spectator/lang/java/servo-migration/index.html b/spectator/lang/java/servo-migration/index.html index fa7a250a..de6e4b6e 100644 --- a/spectator/lang/java/servo-migration/index.html +++ b/spectator/lang/java/servo-migration/index.html @@ -6851,7 +6851,7 @@ - Counters + Counter @@ -6872,7 +6872,7 @@ - Distribution Summaries + Distribution Summary @@ -6893,7 +6893,7 @@ - Gauges + Gauge @@ -6914,7 +6914,7 @@ - Percentile Timers + Percentile Timer @@ -6935,7 +6935,7 @@ - Timers + Timer @@ -7069,7 +7069,7 @@ - Age Gauges + Age Gauge @@ -7090,7 +7090,7 @@ - Counters + Counter @@ -7111,7 +7111,7 @@ - Distribution Summaries + Distribution Summary @@ -7153,7 +7153,7 @@ - Max Gauges + Max Gauge @@ -7170,11 +7170,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7191,11 +7191,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7212,11 +7212,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7237,7 +7258,7 @@ - Timers + Timer diff --git a/spectator/lang/java/testing/index.html b/spectator/lang/java/testing/index.html index badf7829..3d2b68c0 100644 --- a/spectator/lang/java/testing/index.html +++ b/spectator/lang/java/testing/index.html @@ -6627,7 +6627,7 @@ - Counters + Counter @@ -6648,7 +6648,7 @@ - Distribution Summaries + Distribution Summary @@ -6669,7 +6669,7 @@ - Gauges + Gauge @@ -6690,7 +6690,7 @@ - Percentile Timers + Percentile Timer @@ -6711,7 +6711,7 @@ - Timers + Timer @@ -6845,7 +6845,7 @@ - Age Gauges + Age Gauge @@ -6866,7 +6866,7 @@ - Counters + Counter @@ -6887,7 +6887,7 @@ - Distribution Summaries + Distribution Summary @@ -6929,7 +6929,7 @@ - Max Gauges + Max Gauge @@ -6946,11 +6946,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6967,11 +6967,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6988,11 +6988,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7013,7 +7034,7 @@ - Timers + Timer diff --git a/spectator/lang/java/usage/index.html b/spectator/lang/java/usage/index.html index 4ab08adf..efaf4bc1 100644 --- a/spectator/lang/java/usage/index.html +++ b/spectator/lang/java/usage/index.html @@ -6656,7 +6656,7 @@ - Counters + Counter @@ -6677,7 +6677,7 @@ - Distribution Summaries + Distribution Summary @@ -6698,7 +6698,7 @@ - Gauges + Gauge @@ -6719,7 +6719,7 @@ - Percentile Timers + Percentile Timer @@ -6740,7 +6740,7 @@ - Timers + Timer @@ -6874,7 +6874,7 @@ - Age Gauges + Age Gauge @@ -6895,7 +6895,7 @@ - Counters + Counter @@ -6916,7 +6916,7 @@ - Distribution Summaries + Distribution Summary @@ -6958,7 +6958,7 @@ - Max Gauges + Max Gauge @@ -6975,11 +6975,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6996,11 +6996,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7017,11 +7017,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7042,7 +7063,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-cpu/index.html b/spectator/lang/nodejs/ext/nodejs-cpu/index.html index 0778f5a5..cda54098 100644 --- a/spectator/lang/nodejs/ext/nodejs-cpu/index.html +++ b/spectator/lang/nodejs/ext/nodejs-cpu/index.html @@ -6633,7 +6633,7 @@ - Counters + Counter @@ -6654,7 +6654,7 @@ - Distribution Summaries + Distribution Summary @@ -6675,7 +6675,7 @@ - Gauges + Gauge @@ -6696,7 +6696,7 @@ - Percentile Timers + Percentile Timer @@ -6717,7 +6717,7 @@ - Timers + Timer @@ -6851,7 +6851,7 @@ - Age Gauges + Age Gauge @@ -6872,7 +6872,7 @@ - Counters + Counter @@ -6893,7 +6893,7 @@ - Distribution Summaries + Distribution Summary @@ -6935,7 +6935,7 @@ - Max Gauges + Max Gauge @@ -6952,11 +6952,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6973,11 +6973,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6994,11 +6994,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7019,7 +7040,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-eventloop/index.html b/spectator/lang/nodejs/ext/nodejs-eventloop/index.html index 3a7dd964..030f4256 100644 --- a/spectator/lang/nodejs/ext/nodejs-eventloop/index.html +++ b/spectator/lang/nodejs/ext/nodejs-eventloop/index.html @@ -6642,7 +6642,7 @@ - Counters + Counter @@ -6663,7 +6663,7 @@ - Distribution Summaries + Distribution Summary @@ -6684,7 +6684,7 @@ - Gauges + Gauge @@ -6705,7 +6705,7 @@ - Percentile Timers + Percentile Timer @@ -6726,7 +6726,7 @@ - Timers + Timer @@ -6860,7 +6860,7 @@ - Age Gauges + Age Gauge @@ -6881,7 +6881,7 @@ - Counters + Counter @@ -6902,7 +6902,7 @@ - Distribution Summaries + Distribution Summary @@ -6944,7 +6944,7 @@ - Max Gauges + Max Gauge @@ -6961,11 +6961,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6982,11 +6982,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7003,11 +7003,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7028,7 +7049,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-filedescriptor/index.html b/spectator/lang/nodejs/ext/nodejs-filedescriptor/index.html index 7413f4f2..5f3c6017 100644 --- a/spectator/lang/nodejs/ext/nodejs-filedescriptor/index.html +++ b/spectator/lang/nodejs/ext/nodejs-filedescriptor/index.html @@ -6642,7 +6642,7 @@ - Counters + Counter @@ -6663,7 +6663,7 @@ - Distribution Summaries + Distribution Summary @@ -6684,7 +6684,7 @@ - Gauges + Gauge @@ -6705,7 +6705,7 @@ - Percentile Timers + Percentile Timer @@ -6726,7 +6726,7 @@ - Timers + Timer @@ -6860,7 +6860,7 @@ - Age Gauges + Age Gauge @@ -6881,7 +6881,7 @@ - Counters + Counter @@ -6902,7 +6902,7 @@ - Distribution Summaries + Distribution Summary @@ -6944,7 +6944,7 @@ - Max Gauges + Max Gauge @@ -6961,11 +6961,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6982,11 +6982,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7003,11 +7003,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7028,7 +7049,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-gc/index.html b/spectator/lang/nodejs/ext/nodejs-gc/index.html index abd1582b..133b2630 100644 --- a/spectator/lang/nodejs/ext/nodejs-gc/index.html +++ b/spectator/lang/nodejs/ext/nodejs-gc/index.html @@ -6669,7 +6669,7 @@ - Counters + Counter @@ -6690,7 +6690,7 @@ - Distribution Summaries + Distribution Summary @@ -6711,7 +6711,7 @@ - Gauges + Gauge @@ -6732,7 +6732,7 @@ - Percentile Timers + Percentile Timer @@ -6753,7 +6753,7 @@ - Timers + Timer @@ -6887,7 +6887,7 @@ - Age Gauges + Age Gauge @@ -6908,7 +6908,7 @@ - Counters + Counter @@ -6929,7 +6929,7 @@ - Distribution Summaries + Distribution Summary @@ -6971,7 +6971,7 @@ - Max Gauges + Max Gauge @@ -6988,11 +6988,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7009,11 +7009,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7030,11 +7030,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7055,7 +7076,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-heap/index.html b/spectator/lang/nodejs/ext/nodejs-heap/index.html index 2a04ffc6..41ee39ac 100644 --- a/spectator/lang/nodejs/ext/nodejs-heap/index.html +++ b/spectator/lang/nodejs/ext/nodejs-heap/index.html @@ -6705,7 +6705,7 @@ - Counters + Counter @@ -6726,7 +6726,7 @@ - Distribution Summaries + Distribution Summary @@ -6747,7 +6747,7 @@ - Gauges + Gauge @@ -6768,7 +6768,7 @@ - Percentile Timers + Percentile Timer @@ -6789,7 +6789,7 @@ - Timers + Timer @@ -6923,7 +6923,7 @@ - Age Gauges + Age Gauge @@ -6944,7 +6944,7 @@ - Counters + Counter @@ -6965,7 +6965,7 @@ - Distribution Summaries + Distribution Summary @@ -7007,7 +7007,7 @@ - Max Gauges + Max Gauge @@ -7024,11 +7024,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7045,11 +7045,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7066,11 +7066,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7091,7 +7112,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-heapspace/index.html b/spectator/lang/nodejs/ext/nodejs-heapspace/index.html index f01ed127..d2bfed34 100644 --- a/spectator/lang/nodejs/ext/nodejs-heapspace/index.html +++ b/spectator/lang/nodejs/ext/nodejs-heapspace/index.html @@ -6660,7 +6660,7 @@ - Counters + Counter @@ -6681,7 +6681,7 @@ - Distribution Summaries + Distribution Summary @@ -6702,7 +6702,7 @@ - Gauges + Gauge @@ -6723,7 +6723,7 @@ - Percentile Timers + Percentile Timer @@ -6744,7 +6744,7 @@ - Timers + Timer @@ -6878,7 +6878,7 @@ - Age Gauges + Age Gauge @@ -6899,7 +6899,7 @@ - Counters + Counter @@ -6920,7 +6920,7 @@ - Distribution Summaries + Distribution Summary @@ -6962,7 +6962,7 @@ - Max Gauges + Max Gauge @@ -6979,11 +6979,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7000,11 +7000,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7021,11 +7021,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7046,7 +7067,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/ext/nodejs-memory/index.html b/spectator/lang/nodejs/ext/nodejs-memory/index.html index f5b2473e..2c03a485 100644 --- a/spectator/lang/nodejs/ext/nodejs-memory/index.html +++ b/spectator/lang/nodejs/ext/nodejs-memory/index.html @@ -6660,7 +6660,7 @@ - Counters + Counter @@ -6681,7 +6681,7 @@ - Distribution Summaries + Distribution Summary @@ -6702,7 +6702,7 @@ - Gauges + Gauge @@ -6723,7 +6723,7 @@ - Percentile Timers + Percentile Timer @@ -6744,7 +6744,7 @@ - Timers + Timer @@ -6878,7 +6878,7 @@ - Age Gauges + Age Gauge @@ -6899,7 +6899,7 @@ - Counters + Counter @@ -6920,7 +6920,7 @@ - Distribution Summaries + Distribution Summary @@ -6962,7 +6962,7 @@ - Max Gauges + Max Gauge @@ -6979,11 +6979,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7000,11 +7000,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7021,11 +7021,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7046,7 +7067,7 @@ - Timers + Timer diff --git a/spectator/lang/nodejs/meters/counter/index.html b/spectator/lang/nodejs/meters/counter/index.html index 010ac309..f5fb63ff 100644 --- a/spectator/lang/nodejs/meters/counter/index.html +++ b/spectator/lang/nodejs/meters/counter/index.html @@ -22,7 +22,7 @@ - Counters - Atlas Docs + Counter - Atlas Docs @@ -102,7 +102,7 @@
    - Counters + Counter
    @@ -6564,7 +6564,7 @@ - Counters + Counter @@ -6586,7 +6586,7 @@ - Distribution Summaries + Distribution Summary @@ -6607,7 +6607,7 @@ - Gauges + Gauge @@ -6628,7 +6628,7 @@ - Percentile Timers + Percentile Timer @@ -6649,7 +6649,7 @@ - Timers + Timer @@ -6783,7 +6783,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Counters

    +

    Counter

    TBD

    diff --git a/spectator/lang/nodejs/meters/dist-summary/index.html b/spectator/lang/nodejs/meters/dist-summary/index.html index 472c656c..e171a2ef 100644 --- a/spectator/lang/nodejs/meters/dist-summary/index.html +++ b/spectator/lang/nodejs/meters/dist-summary/index.html @@ -22,7 +22,7 @@ - Distribution Summaries - Atlas Docs + Distribution Summary - Atlas Docs @@ -102,7 +102,7 @@
    - Distribution Summaries + Distribution Summary
    @@ -6557,7 +6557,7 @@ - Counters + Counter @@ -6585,7 +6585,7 @@ - Distribution Summaries + Distribution Summary @@ -6607,7 +6607,7 @@ - Gauges + Gauge @@ -6628,7 +6628,7 @@ - Percentile Timers + Percentile Timer @@ -6649,7 +6649,7 @@ - Timers + Timer @@ -6783,7 +6783,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Distribution Summaries

    +

    Distribution Summary

    TBD

    diff --git a/spectator/lang/nodejs/meters/gauge/index.html b/spectator/lang/nodejs/meters/gauge/index.html index 00fc345c..094b2c94 100644 --- a/spectator/lang/nodejs/meters/gauge/index.html +++ b/spectator/lang/nodejs/meters/gauge/index.html @@ -22,7 +22,7 @@ - Gauges - Atlas Docs + Gauge - Atlas Docs @@ -102,7 +102,7 @@
    - Gauges + Gauge
    @@ -6557,7 +6557,7 @@ - Counters + Counter @@ -6578,7 +6578,7 @@ - Distribution Summaries + Distribution Summary @@ -6606,7 +6606,7 @@ - Gauges + Gauge @@ -6628,7 +6628,7 @@ - Percentile Timers + Percentile Timer @@ -6649,7 +6649,7 @@ - Timers + Timer @@ -6783,7 +6783,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Gauges

    +

    Gauge

    TBD

    diff --git a/spectator/lang/nodejs/meters/percentile-timer/index.html b/spectator/lang/nodejs/meters/percentile-timer/index.html index a17e6cab..aac8ce0a 100644 --- a/spectator/lang/nodejs/meters/percentile-timer/index.html +++ b/spectator/lang/nodejs/meters/percentile-timer/index.html @@ -22,7 +22,7 @@ - Percentile Timers - Atlas Docs + Percentile Timer - Atlas Docs @@ -102,7 +102,7 @@
    - Percentile Timers + Percentile Timer
    @@ -6557,7 +6557,7 @@ - Counters + Counter @@ -6578,7 +6578,7 @@ - Distribution Summaries + Distribution Summary @@ -6599,7 +6599,7 @@ - Gauges + Gauge @@ -6627,7 +6627,7 @@ - Percentile Timers + Percentile Timer @@ -6649,7 +6649,7 @@ - Timers + Timer @@ -6783,7 +6783,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Percentile Timers

    +

    Percentile Timer

    TBD

    diff --git a/spectator/lang/nodejs/meters/timer/index.html b/spectator/lang/nodejs/meters/timer/index.html index 3de19dab..e268889e 100644 --- a/spectator/lang/nodejs/meters/timer/index.html +++ b/spectator/lang/nodejs/meters/timer/index.html @@ -22,7 +22,7 @@ - Timers - Atlas Docs + Timer - Atlas Docs @@ -102,7 +102,7 @@
    - Timers + Timer
    @@ -6557,7 +6557,7 @@ - Counters + Counter @@ -6578,7 +6578,7 @@ - Distribution Summaries + Distribution Summary @@ -6599,7 +6599,7 @@ - Gauges + Gauge @@ -6620,7 +6620,7 @@ - Percentile Timers + Percentile Timer @@ -6648,7 +6648,7 @@ - Timers + Timer @@ -6783,7 +6783,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Timers

    +

    Timer

    TBD

    diff --git a/spectator/lang/nodejs/usage/index.html b/spectator/lang/nodejs/usage/index.html index 1169397f..c2efd2f0 100644 --- a/spectator/lang/nodejs/usage/index.html +++ b/spectator/lang/nodejs/usage/index.html @@ -6665,7 +6665,7 @@ - Counters + Counter @@ -6686,7 +6686,7 @@ - Distribution Summaries + Distribution Summary @@ -6707,7 +6707,7 @@ - Gauges + Gauge @@ -6728,7 +6728,7 @@ - Percentile Timers + Percentile Timer @@ -6749,7 +6749,7 @@ - Timers + Timer @@ -6883,7 +6883,7 @@ - Age Gauges + Age Gauge @@ -6904,7 +6904,7 @@ - Counters + Counter @@ -6925,7 +6925,7 @@ - Distribution Summaries + Distribution Summary @@ -6967,7 +6967,7 @@ - Max Gauges + Max Gauge @@ -6984,11 +6984,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -7005,11 +7005,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -7026,11 +7026,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -7051,7 +7072,7 @@ - Timers + Timer diff --git a/spectator/lang/overview/index.html b/spectator/lang/overview/index.html index ef221d3c..4643e404 100644 --- a/spectator/lang/overview/index.html +++ b/spectator/lang/overview/index.html @@ -6612,7 +6612,7 @@ - Counters + Counter @@ -6633,7 +6633,7 @@ - Distribution Summaries + Distribution Summary @@ -6654,7 +6654,7 @@ - Gauges + Gauge @@ -6675,7 +6675,7 @@ - Percentile Timers + Percentile Timer @@ -6696,7 +6696,7 @@ - Timers + Timer @@ -6830,7 +6830,7 @@ - Age Gauges + Age Gauge @@ -6851,7 +6851,7 @@ - Counters + Counter @@ -6872,7 +6872,7 @@ - Distribution Summaries + Distribution Summary @@ -6914,7 +6914,7 @@ - Max Gauges + Max Gauge @@ -6931,11 +6931,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6952,11 +6952,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6973,11 +6973,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6998,7 +7019,7 @@ - Timers + Timer diff --git a/spectator/lang/py/meters/age-gauge/index.html b/spectator/lang/py/meters/age-gauge/index.html index 6c25a0be..4d0e9f7f 100644 --- a/spectator/lang/py/meters/age-gauge/index.html +++ b/spectator/lang/py/meters/age-gauge/index.html @@ -22,7 +22,7 @@ - Age Gauges - Atlas Docs + Age Gauge - Atlas Docs @@ -102,7 +102,7 @@
    - Age Gauges + Age Gauge
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6782,7 +6782,7 @@ - Age Gauges + Age Gauge @@ -6804,7 +6804,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Age Gauges

    +

    Age Gauge

    The value is the time in seconds since the epoch at which an event has successfully occurred, or 0 to use the current time in epoch seconds. After an Age Gauge has been set, it will continue @@ -7361,14 +7382,22 @@

    Age Gauges

    process runs. The purpose of this metric type is to enable users to more easily implement the Time Since Last Success alerting pattern.

    To set a specific time as the last success:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
     
    -GlobalRegistry.age_gauge("time.sinceLastSuccess").set(1611081000)
    +registry = Registry()
    +registry.age_gauge("time.sinceLastSuccess").set(1611081000)
    +
    +last_success = registry.new_id("time.sinceLastSuccess")
    +registry.age_gauge_with_id(last_success).set(1611081000)
     

    To set now() as the last success:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.age_gauge("time.sinceLastSuccess").now()
     
    -GlobalRegistry.age_gauge("time.sinceLastSuccess").set(0)
    +last_success = registry.new_id("time.sinceLastSuccess")
    +registry.age_gauge_with_id(last_success).now()
     

    By default, a maximum of 1000 Age Gauges are allowed per spectatord process, because there is no mechanism for cleaning them up. This value may be tuned with the --age_gauge_limit flag on the @@ -7376,7 +7405,7 @@

    Age Gauges

    Since Age Gauges are long-lived entities that reside in the memory of the SpectatorD process, if you need to delete and re-create them for any reason, then you can use the SpectatorD admin server to accomplish this task. You can delete all Age Gauges or a single Age Gauge.

    -

    Example:

    +

    Example:

    curl -X DELETE \
     http://localhost:1234/metrics/A
     
    diff --git a/spectator/lang/py/meters/counter/index.html b/spectator/lang/py/meters/counter/index.html index 9213886d..44f7a6e1 100644 --- a/spectator/lang/py/meters/counter/index.html +++ b/spectator/lang/py/meters/counter/index.html @@ -22,7 +22,7 @@ - Counters - Atlas Docs + Counter - Atlas Docs @@ -102,7 +102,7 @@
    - Counters + Counter
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6803,7 +6803,7 @@ - Counters + Counter @@ -6825,7 +6825,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,22 +7374,30 @@ -

    Counters

    +

    Counter

    -

    A Counter is used to measure the rate at which an event is occurring. Considering an API -endpoint, a Counter could be used to measure the rate at which it is being accessed.

    -

    Counters are reported to the backend as a rate-per-second. In Atlas, the :per-step operator -can be used to convert them back into a value-per-step on a graph.

    +

    A Counter is used to measure the rate at which an event is occurring. Considering an API endpoint, +a Counter could be used to measure the rate at which it is being accessed.

    +

    Counters are reported to the backend as a rate-per-second. In Atlas, the :per-step operator can +be used to convert them back into a value-per-step on a graph.

    Call increment() when an event occurs:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
     
    -GlobalRegistry.counter("server.numRequests").increment()
    +registry = Registry()
    +registry.counter("server.numRequests").increment()
    +
    +num_requests = registry.new_id("server.numRequests")
    +registry.counter_with_id(num_requests).increment()
     

    You can also pass a value to increment(). This is useful when a collection of events happens together:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.counter("queue.itemsAdded").increment(10)
     
    -GlobalRegistry.counter("queue.itemsAdded").increment(10)
    +num_requests = registry.new_id("server.numRequests")
    +registry.counter_with_id(num_requests).increment(10)
     
    diff --git a/spectator/lang/py/meters/dist-summary/index.html b/spectator/lang/py/meters/dist-summary/index.html index 9d4a2382..6b43b17c 100644 --- a/spectator/lang/py/meters/dist-summary/index.html +++ b/spectator/lang/py/meters/dist-summary/index.html @@ -22,7 +22,7 @@ - Distribution Summaries - Atlas Docs + Distribution Summary - Atlas Docs @@ -102,7 +102,7 @@
    - Distribution Summaries + Distribution Summary
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6824,7 +6824,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,7 +7374,7 @@ -

    Distribution Summaries

    +

    Distribution Summary

    A Distribution Summary is used to track the distribution of events. It is similar to a Timer, but more general, in that the size does not have to be a period of time. For example, a Distribution @@ -7362,9 +7383,13 @@

    Distribution Summaries

    are readable. If you are measuring payload size, then use bytes, not kilobytes (or some other unit). This means that a 4K tick label will represent 4 kilobytes, rather than 4 kilo-kilobytes.

    Call record() with a value:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.distribution_summary("server.requestSize").record(10)
     
    -GlobalRegistry.distribution_summary("server.requestSize").record(10)
    +request_size = registry.new_id("server.requestSize")
    +registry.distribution_summary_with_id(request_size).record(10)
     
    diff --git a/spectator/lang/py/meters/gauge/index.html b/spectator/lang/py/meters/gauge/index.html index 984cf9a4..9a93fc8c 100644 --- a/spectator/lang/py/meters/gauge/index.html +++ b/spectator/lang/py/meters/gauge/index.html @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6867,7 +6867,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7362,16 +7383,24 @@

    Gauges

    then a gauge for the size will show the size when it was sampled. The size may have been much higher or lower at some point during interval, but that is not known.

    Call set() with a value:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
     
    -GlobalRegistry.gauge("server.queueSize").set(10)
    +registry = Registry()
    +registry.gauge("server.queueSize").set(10)
    +
    +queue_size = registry.new_id("server.queueSize")
    +registry.gauge_with_id(queue_size).set(10)
     

    Gauges will report the last set value for 15 minutes. This done so that updates to the values do not need to be collected on a tight 1-minute schedule to ensure that Atlas shows unbroken lines in graphs. A custom TTL may be configured for gauges. SpectatorD enforces a minimum TTL of 5 seconds.

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.gauge("server.queueSize", ttl_seconds=120).set(10)
     
    -GlobalRegistry.gauge("server.queueSize", ttl_seconds=120).set(10)
    +queue_size = registry.new_id("server.queueSize")
    +registry.gauge_with_id(queue_size, ttl_seconds=120).set(10)
     
    diff --git a/spectator/lang/py/meters/max-gauge/index.html b/spectator/lang/py/meters/max-gauge/index.html index 6f5059e2..98472688 100644 --- a/spectator/lang/py/meters/max-gauge/index.html +++ b/spectator/lang/py/meters/max-gauge/index.html @@ -14,7 +14,7 @@ - + @@ -22,7 +22,7 @@ - Max Gauges - Atlas Docs + Max Gauge - Atlas Docs @@ -102,7 +102,7 @@
    - Max Gauges + Max Gauge
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6866,7 +6866,7 @@ - Max Gauges + Max Gauge @@ -6884,11 +6884,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,16 +7374,20 @@ -

    Max Gauges

    +

    Max Gauge

    The value is a number that is sampled at a point in time, but it is reported as a maximum Gauge value to the backend. This ensures that only the maximum value observed during a reporting interval is sent to the backend, thus over-riding the last-write-wins semantics of standard Gauges. Unlike standard Gauges, Max Gauges do not continue to report to the backend, and there is no TTL.

    Call set() with a value:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.max_gauge("server.queueSize").set(10)
     
    -GlobalRegistry.max_gauge("server.queueSize").set(10)
    +queue_size = registry.new_id("server.queueSize")
    +registry.max_gauge_with_id(queue_size).set(10)
     
    diff --git a/spectator/lang/py/meters/pct-timer/index.html b/spectator/lang/py/meters/monotonic-counter-uint/index.html similarity index 97% rename from spectator/lang/py/meters/pct-timer/index.html rename to spectator/lang/py/meters/monotonic-counter-uint/index.html index b677bcd0..d4dd6231 100644 --- a/spectator/lang/py/meters/pct-timer/index.html +++ b/spectator/lang/py/meters/monotonic-counter-uint/index.html @@ -8,13 +8,13 @@ - + - + - + @@ -22,7 +22,7 @@ - Percentile Timers - Atlas Docs + Monotonic Counter Uint - Atlas Docs @@ -102,7 +102,7 @@
    - Percentile Timers + Monotonic Counter Uint
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6859,7 +6859,7 @@ - Max Gauges + Max Gauge @@ -6876,11 +6876,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6894,18 +6894,26 @@ + -
  • - + +
  • + + + + + + - Percentile Distribution Summaries + Monotonic Counter Uint +
  • @@ -6915,26 +6923,39 @@ - +
  • + + -
  • - - - - - - + + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + - Percentile Timers + Percentile Timer -
  • @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,29 +7374,22 @@ -

    Percentile Timers

    - -

    The value is the number of seconds that have elapsed for an event, with percentile estimates.

    -

    This metric type will track the data distribution by maintaining a set of Counters. The -distribution can then be used on the server side to estimate percentiles, while still -allowing for arbitrary slicing and dicing based on dimensions.

    -

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of -up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile -Timers and ensure that they have a small bounded cardinality.

    -

    Call record() with a value:

    -
    from spectator import GlobalRegistry
    +  

    Monotonic Counter Uint

    -GlobalRegistry.pct_timer("server.requestLatency").record(0.01) -
    -

    A stopwatch() method is available which may be used as a Context Manager -to automatically record the number of seconds that have elapsed while executing a block of code:

    -
    import time
    -from spectator import GlobalRegistry
    +

    A Monotonic Counter (uint64) is used to measure the rate at which an event is occurring, when the +source data is a monotonically increasing number. A minimum of two samples must be sent, in order to +calculate a delta value and report it to the backend as a rate-per-second. A variety of networking +metrics may be reported monotonically, and this metric type provides a convenient means of recording +these values, at the expense of a slower time-to-first metric.

    +

    Call set() when an event occurs:

    +
    from ctypes import c_uint64
    +from spectator.registry import Registry
     
    -t = GlobalRegistry.pct_timer("thread.sleep")
    +registry = Registry()
    +registry.monotonic_counter_uint("iface.bytes").set(c_uint64(1))
     
    -with t.stopwatch():
    -    time.sleep(5)
    +iface_bytes = registry.new_id("iface.bytes")
    +registry.monotonic_counter_uint_with_id(iface_bytes).set(c_uint64(1))
     
    diff --git a/spectator/lang/py/meters/mono-counter/index.html b/spectator/lang/py/meters/monotonic-counter/index.html similarity index 98% rename from spectator/lang/py/meters/mono-counter/index.html rename to spectator/lang/py/meters/monotonic-counter/index.html index 84a7de5f..38a6bd35 100644 --- a/spectator/lang/py/meters/mono-counter/index.html +++ b/spectator/lang/py/meters/monotonic-counter/index.html @@ -8,13 +8,13 @@ - + - + @@ -22,7 +22,7 @@ - Monotonic Counters - Atlas Docs + Monotonic Counter - Atlas Docs @@ -102,7 +102,7 @@
    - Monotonic Counters + Monotonic Counter
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6859,7 +6859,7 @@ - Max Gauges + Max Gauge @@ -6887,7 +6887,7 @@ - Monotonic Counters + Monotonic Counter @@ -6905,11 +6905,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6926,11 +6926,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,17 +7374,21 @@ -

    Monotonic Counters

    +

    Monotonic Counter

    -

    A Monotonic Counter is used to measure the rate at which an event is occurring, when the source -data is a monotonically increasing number. A minimum of two samples must be sent, in order to +

    A Monotonic Counter (float) is used to measure the rate at which an event is occurring, when the +source data is a monotonically increasing number. A minimum of two samples must be sent, in order to calculate a delta value and report it to the backend as a rate-per-second. A variety of networking metrics may be reported monotonically, and this metric type provides a convenient means of recording these values, at the expense of a slower time-to-first metric.

    Call set() when an event occurs:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.monotonic_counter("iface.bytes").set(10)
     
    -GlobalRegistry.monotonic_counter("iface.bytes").set(10)
    +iface_bytes = registry.new_id("iface.bytes")
    +registry.monotonic_counter_with_id(iface_bytes).set(10)
     
    diff --git a/spectator/lang/py/meters/pct-dist-summary/index.html b/spectator/lang/py/meters/percentile-dist-summary/index.html similarity index 98% rename from spectator/lang/py/meters/pct-dist-summary/index.html rename to spectator/lang/py/meters/percentile-dist-summary/index.html index 0d8616cc..45cbbf40 100644 --- a/spectator/lang/py/meters/pct-dist-summary/index.html +++ b/spectator/lang/py/meters/percentile-dist-summary/index.html @@ -8,13 +8,13 @@ - + - + - + @@ -22,7 +22,7 @@ - Percentile Distribution Summaries - Atlas Docs + Percentile Distribution Summary - Atlas Docs @@ -102,7 +102,7 @@
    - Percentile Distribution Summaries + Percentile Distribution Summary
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6859,7 +6859,7 @@ - Max Gauges + Max Gauge @@ -6876,11 +6876,32 @@
  • - + - Monotonic Counters + Monotonic Counter + + + + +
  • + + + + + + + + + + +
  • + + + + + Monotonic Counter Uint @@ -6908,7 +6929,7 @@ - Percentile Distribution Summaries + Percentile Distribution Summary @@ -6926,11 +6947,11 @@
  • - + - Percentile Timers + Percentile Timer @@ -6951,7 +6972,7 @@ - Timers + Timer @@ -7353,19 +7374,23 @@ -

    Percentile Distribution Summaries

    +

    Percentile Distribution Summary

    The value tracks the distribution of events, with percentile estimates. It is similar to a -Percentile Timer, but more general, because the size does not have to be a period of time.

    +PercentileTimer, but more general, because the size does not have to be a period of time.

    For example, it can be used to measure the payload sizes of requests hitting a server or the number of records returned from a query.

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of up to 300X that of a standard Distribution Summary. Be diligent about any additional dimensions added to Percentile Distribution Summaries and ensure that they have a small bounded cardinality.

    Call record() with a value:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.pct_distribution_summary("server.requestSize").record(10)
     
    -GlobalRegistry.pct_distribution_summary("server.requestSize").record(10)
    +request_size = registry.new_id("server.requestSize")
    +registry.pct_distribution_summary_with_id(request_size).record(10)
     
    diff --git a/spectator/lang/py/meters/percentile-timer/index.html b/spectator/lang/py/meters/percentile-timer/index.html new file mode 100644 index 00000000..f68dfae3 --- /dev/null +++ b/spectator/lang/py/meters/percentile-timer/index.html @@ -0,0 +1,7460 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Percentile Timer - Atlas Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    +
    + +
    + + + + +
    + + +
    + +
    + + + + + + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Percentile Timer

    + +

    The value is the number of seconds that have elapsed for an event, with percentile estimates.

    +

    This metric type will track the data distribution by maintaining a set of Counters. The +distribution can then be used on the server side to estimate percentiles, while still +allowing for arbitrary slicing and dicing based on dimensions.

    +

    In order to maintain the data distribution, they have a higher storage cost, with a worst-case of +up to 300X that of a standard Timer. Be diligent about any additional dimensions added to Percentile +Timers and ensure that they have a small bounded cardinality.

    +

    Call record() with a value:

    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.pct_timer("server.requestLatency").record(0.01)
    +
    +request_latency = registry.new_id("server.requestLatency")
    +registry.pct_timer_with_id(request_latency).record(0.01)
    +
    +

    A StopWatch class is available, which may be used as a Context Manager to automatically record +the number of seconds that have elapsed while executing a block of code:

    +
    import time
    +from spectator.registry import Registry
    +from spectator.stopwatch import StopWatch
    +
    +registry = Registry()
    +thread_sleep = registry.pct_timer("thread.sleep")
    +
    +with StopWatch(thread_sleep):
    +    time.sleep(5)
    +
    + + + + + + + + + + + + + +
    +
    + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/spectator/lang/py/meters/timer/index.html b/spectator/lang/py/meters/timer/index.html index 066b5b5a..b4c4c9b0 100644 --- a/spectator/lang/py/meters/timer/index.html +++ b/spectator/lang/py/meters/timer/index.html @@ -11,7 +11,7 @@ - + @@ -22,7 +22,7 @@ - Timers - Atlas Docs + Timer - Atlas Docs @@ -102,7 +102,7 @@
    - Timers + Timer
    @@ -6553,7 +6553,7 @@ - Counters + Counter @@ -6574,7 +6574,7 @@ - Distribution Summaries + Distribution Summary @@ -6595,7 +6595,7 @@ - Gauges + Gauge @@ -6616,7 +6616,7 @@ - Percentile Timers + Percentile Timer @@ -6637,7 +6637,7 @@ - Timers + Timer @@ -6775,7 +6775,7 @@ - Age Gauges + Age Gauge @@ -6796,7 +6796,7 @@ - Counters + Counter @@ -6817,7 +6817,7 @@ - Distribution Summaries + Distribution Summary @@ -6859,7 +6859,7 @@ - Max Gauges + Max Gauge @@ -6876,11 +6876,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6897,11 +6897,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6918,11 +6918,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6950,7 +6971,7 @@ - Timers + Timer @@ -7353,31 +7374,30 @@ -

    Timers

    +

    Timer

    A Timer is used to measure how long (in seconds) some event is taking.

    Call record() with a value:

    -
    from spectator import GlobalRegistry
    +
    from spectator.registry import Registry
    +
    +registry = Registry()
    +registry.timer("server.requestLatency").record(0.01)
     
    -GlobalRegistry.timer("server.requestLatency").record(0.01)
    +request_latency = registry.new_id("server.requestLatency")
    +registry.timer_with_id(request_latency).record(0.01)
     
    -

    A stopwatch() method is available which may be used as a Context Manager to automatically record +

    A StopWatch class is available, which may be used as a Context Manager to automatically record the number of seconds that have elapsed while executing a block of code:

    import time
    -from spectator import GlobalRegistry
    +from spectator.registry import Registry
    +from spectator.stopwatch import StopWatch
     
    -t = GlobalRegistry.timer("thread.sleep")
    +registry = Registry()
    +thread_sleep = registry.timer("thread.sleep")
     
    -with t.stopwatch():
    +with StopWatch(thread_sleep):
         time.sleep(5)
     
    -

    Internally, Timers will keep track of the following statistics as they are used:

    -
      -
    • count
    • -
    • totalTime
    • -
    • totalOfSquares
    • -
    • max
    • -
    diff --git a/spectator/lang/py/migrations/index.html b/spectator/lang/py/migrations/index.html index 1f60f98e..3ed291e1 100644 --- a/spectator/lang/py/migrations/index.html +++ b/spectator/lang/py/migrations/index.html @@ -74,7 +74,7 @@
    - + Skip to content @@ -6558,7 +6558,7 @@ - Counters + Counter @@ -6579,7 +6579,7 @@ - Distribution Summaries + Distribution Summary @@ -6600,7 +6600,7 @@ - Gauges + Gauge @@ -6621,7 +6621,7 @@ - Percentile Timers + Percentile Timer @@ -6642,7 +6642,7 @@ - Timers + Timer @@ -6778,7 +6778,7 @@ - Age Gauges + Age Gauge @@ -6799,7 +6799,7 @@ - Counters + Counter @@ -6820,7 +6820,7 @@ - Distribution Summaries + Distribution Summary @@ -6862,7 +6862,7 @@ - Max Gauges + Max Gauge @@ -6879,11 +6879,11 @@
  • - + - Monotonic Counters + Monotonic Counter @@ -6900,11 +6900,11 @@
  • - + - Percentile Distribution Summaries + Monotonic Counter Uint @@ -6921,11 +6921,32 @@
  • - + - Percentile Timers + Percentile Distribution Summary + + + + +
  • + + + + + + + + + + +
  • + + + + + Percentile Timer @@ -6946,7 +6967,7 @@ - Timers + Timer @@ -7013,9 +7034,126 @@