diff --git a/src/github.com/matrix-org/dendrite/clientapi/routing/register.go b/src/github.com/matrix-org/dendrite/clientapi/routing/register.go index b4e0d3ad..7a3b8686 100644 --- a/src/github.com/matrix-org/dendrite/clientapi/routing/register.go +++ b/src/github.com/matrix-org/dendrite/clientapi/routing/register.go @@ -40,9 +40,20 @@ import ( "github.com/matrix-org/dendrite/clientapi/jsonerror" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" + "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) +var ( + // Prometheus metrics + amtRegUsers = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "dendrite_clientapi_reg_users_total", + Help: "Total number of registered users", + }, + ) +) + const ( minPasswordLength = 8 // http://matrix.org/docs/spec/client_server/r0.2.0.html#password-based maxPasswordLength = 512 // https://github.com/matrix-org/synapse/blob/v0.20.0/synapse/rest/client/v2_alpha/register.py#L161 @@ -50,6 +61,11 @@ const ( sessionIDLength = 24 ) +func init() { + // Register prometheus metrics. They must be registered to be exposed. + prometheus.MustRegister(amtRegUsers) +} + // sessionsDict keeps track of completed auth stages for each session. type sessionsDict struct { sessions map[string][]authtypes.LoginType @@ -665,6 +681,9 @@ func completeRegistration( } } + // Increment prometheus counter for created users + amtRegUsers.Inc() + return util.JSONResponse{ Code: http.StatusOK, JSON: registerResponse{ diff --git a/src/github.com/matrix-org/dendrite/cmd/dendrite-monolith-server/main.go b/src/github.com/matrix-org/dendrite/cmd/dendrite-monolith-server/main.go index c00154a7..84c2b394 100644 --- a/src/github.com/matrix-org/dendrite/cmd/dendrite-monolith-server/main.go +++ b/src/github.com/matrix-org/dendrite/cmd/dendrite-monolith-server/main.go @@ -30,6 +30,8 @@ import ( "github.com/matrix-org/dendrite/publicroomsapi" "github.com/matrix-org/dendrite/roomserver" "github.com/matrix-org/dendrite/syncapi" + + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" ) @@ -66,16 +68,21 @@ func main() { httpHandler := common.WrapHandlerInCORS(base.APIMux) + // Set up the API endpoints we handle. /metrics is for prometheus, and is + // not wrapped by CORS, while everything else is + http.Handle("/metrics", promhttp.Handler()) + http.Handle("/", httpHandler) + // Expose the matrix APIs directly rather than putting them under a /api path. go func() { logrus.Info("Listening on ", *httpBindAddr) - logrus.Fatal(http.ListenAndServe(*httpBindAddr, httpHandler)) + logrus.Fatal(http.ListenAndServe(*httpBindAddr, nil)) }() // Handle HTTPS if certificate and key are provided go func() { if *certFile != "" && *keyFile != "" { logrus.Info("Listening on ", *httpsBindAddr) - logrus.Fatal(http.ListenAndServeTLS(*httpsBindAddr, *certFile, *keyFile, httpHandler)) + logrus.Fatal(http.ListenAndServeTLS(*httpsBindAddr, *certFile, *keyFile, nil)) } }() diff --git a/src/github.com/matrix-org/dendrite/common/basecomponent/base.go b/src/github.com/matrix-org/dendrite/common/basecomponent/base.go index 1e589c1a..ed7cb7ab 100644 --- a/src/github.com/matrix-org/dendrite/common/basecomponent/base.go +++ b/src/github.com/matrix-org/dendrite/common/basecomponent/base.go @@ -134,7 +134,6 @@ func (b *BaseDendrite) CreateFederationClient() *gomatrixserverlib.FederationCli // ApiMux under /api/ and adds a prometheus handler under /metrics. func (b *BaseDendrite) SetupAndServeHTTP(addr string) { common.SetupHTTPAPI(http.DefaultServeMux, common.WrapHandlerInCORS(b.APIMux)) - logrus.Infof("Starting %s server on %s", b.componentName, addr) err := http.ListenAndServe(addr, nil) diff --git a/src/github.com/matrix-org/dendrite/common/httpapi.go b/src/github.com/matrix-org/dendrite/common/httpapi.go index 4f6b53ee..1fa57432 100644 --- a/src/github.com/matrix-org/dendrite/common/httpapi.go +++ b/src/github.com/matrix-org/dendrite/common/httpapi.go @@ -10,7 +10,7 @@ import ( "github.com/matrix-org/util" opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" - "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" ) // MakeAuthAPI turns a util.JSONRequestHandler function into an http.Handler which checks the access token in the request. @@ -36,7 +36,7 @@ func MakeExternalAPI(metricsName string, f func(*http.Request) util.JSONResponse h.ServeHTTP(w, req) } - return prometheus.InstrumentHandler(metricsName, http.HandlerFunc(withSpan)) + return http.HandlerFunc(withSpan) } // MakeInternalAPI turns a util.JSONRequestHandler function into an http.Handler. @@ -62,7 +62,7 @@ func MakeInternalAPI(metricsName string, f func(*http.Request) util.JSONResponse h.ServeHTTP(w, req) } - return prometheus.InstrumentHandler(metricsName, http.HandlerFunc(withSpan)) + return http.HandlerFunc(withSpan) } // MakeFedAPI makes an http.Handler that checks matrix federation authentication. @@ -87,8 +87,7 @@ func MakeFedAPI( // SetupHTTPAPI registers an HTTP API mux under /api and sets up a metrics // listener. func SetupHTTPAPI(servMux *http.ServeMux, apiMux http.Handler) { - // This is deprecated. - servMux.Handle("/metrics", prometheus.Handler()) // nolint: megacheck, staticcheck + servMux.Handle("/metrics", promhttp.Handler()) servMux.Handle("/api/", http.StripPrefix("/api", apiMux)) } diff --git a/vendor/manifest b/vendor/manifest index c17b5cf8..7a06d5d4 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -123,7 +123,7 @@ { "importpath": "github.com/matrix-org/dugong", "repository": "https://github.com/matrix-org/dugong", - "revision": "a012152b8769963afc822a9af7a3eceac4d776e8", + "revision": "ea0a4690a0d5b414a18dbb06cf6153309a2d0528", "branch": "master" }, { @@ -209,9 +209,15 @@ "path": "/difflib" }, { - "importpath": "github.com/prometheus/client_golang", + "importpath": "github.com/prometheus/client_golang/prometheus", "repository": "https://github.com/prometheus/client_golang", - "revision": "c317fb74746eac4fc65fe3909195f4cf67c5562a", + "revision": "c51dc758d4bb30acacbef9eaa2b774969a135086", + "branch": "master" + }, + { + "importpath": "github.com/prometheus/client_golang/prometheus/promhttp", + "repository": "https://github.com/prometheus/client_golang", + "revision": "c51dc758d4bb30acacbef9eaa2b774969a135086", "branch": "master" }, { diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/CHANGELOG.md b/vendor/src/github.com/prometheus/client_golang/prometheus/CHANGELOG.md new file mode 100644 index 00000000..330788a4 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/CONTRIBUTING.md b/vendor/src/github.com/prometheus/client_golang/prometheus/CONTRIBUTING.md new file mode 100644 index 00000000..40503edb --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/ISSUE_TEMPLATE.md b/vendor/src/github.com/prometheus/client_golang/prometheus/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..a456acf5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/ISSUE_TEMPLATE.md @@ -0,0 +1,8 @@ + diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/LICENSE b/vendor/src/github.com/prometheus/client_golang/prometheus/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/MAINTAINERS.md b/vendor/src/github.com/prometheus/client_golang/prometheus/MAINTAINERS.md new file mode 100644 index 00000000..3ede55fe --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/MAINTAINERS.md @@ -0,0 +1 @@ +* Björn Rabenstein diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/NOTICE b/vendor/src/github.com/prometheus/client_golang/prometheus/NOTICE new file mode 100644 index 00000000..dd878a30 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md index 44986bff..17d46eca 100644 --- a/vendor/src/github.com/prometheus/client_golang/prometheus/README.md +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md @@ -1 +1,94 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) +[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +__This library requires Go1.7 or later.__ + +## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool + +While our goal is to follow [Semantic Versioning](https://semver.org/), this +repository is still pre-1.0.0. To quote the +[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may +change at any time. The public API should not be considered stable.” We know +that this is at odds with the widespread use of this library. However, just +declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working +towards a 1.0.0 release that actually deserves its major version number. + +Having said that, we aim for always keeping the tip of master in a workable +state and for only introducing ”mildly” breaking changes up to and including +[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that, +a number of ”hard” breaking changes are planned, see the +[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2), +which should get the library much closer to 1.0.0 state. + +Dependency management in Go projects is still in flux, and there are many tools +floating around. While [dep](https://golang.github.io/dep/) might develop into +the de-facto standard tool, it is still officially experimental. The roadmap +for this library has been laid out with a lot of sometimes painful experience +in mind. We really cannot adjust it every other month to the needs of the +currently most popular or most promising Go dependency management tool. The +recommended course of action with dependency management tools is the following: + +- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0 + release. If your dependency management tool expects strict post-1.0.0 semver + semantics, you have to wait. Sorry. +- If you want absolute certainty, please lock to a specific commit. You can + also lock to tags, but please don't ask for more tagging. This would suggest + some release or stability testing procedure that simply is not in place. As + said above, we are aiming for stability of the tip of master, but if we + tagged every single commit, locking to tags would be the same as locking to + commits. +- If you want to get the newer features and improvements and are willing to + take the minor risk of newly introduced bugs and “mild” breakage, just always + update to the tip of master (which is essentially the original idea of Go + dependency management). We recommend to not use features marked as + _deprecated_ in this case. +- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is + out, you could lock to v0.9.x to get bugfixes (and perhaps minor new + features) while avoiding the “hard” breakage that will come with post-0.9 + features. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/VERSION b/vendor/src/github.com/prometheus/client_golang/prometheus/VERSION new file mode 100644 index 00000000..a3df0a69 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/api/client.go b/vendor/src/github.com/prometheus/client_golang/prometheus/api/client.go new file mode 100644 index 00000000..bf267246 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/api/client.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package api provides clients for the HTTP APIs. +package api + +import ( + "context" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +// DefaultRoundTripper is used if no RoundTripper is set in Config. +var DefaultRoundTripper http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// Config defines configuration parameters for a new client. +type Config struct { + // The address of the Prometheus to connect to. + Address string + + // RoundTripper is used by the Client to drive HTTP requests. If not + // provided, DefaultRoundTripper will be used. + RoundTripper http.RoundTripper +} + +func (cfg *Config) roundTripper() http.RoundTripper { + if cfg.RoundTripper == nil { + return DefaultRoundTripper + } + return cfg.RoundTripper +} + +// Client is the interface for an API client. +type Client interface { + URL(ep string, args map[string]string) *url.URL + Do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// NewClient returns a new Client. +// +// It is safe to use the returned Client from multiple goroutines. +func NewClient(cfg Config) (Client, error) { + u, err := url.Parse(cfg.Address) + if err != nil { + return nil, err + } + u.Path = strings.TrimRight(u.Path, "/") + + return &httpClient{ + endpoint: u, + client: http.Client{Transport: cfg.roundTripper()}, + }, nil +} + +type httpClient struct { + endpoint *url.URL + client http.Client +} + +func (c *httpClient) URL(ep string, args map[string]string) *url.URL { + p := path.Join(c.endpoint.Path, ep) + + for arg, val := range args { + arg = ":" + arg + p = strings.Replace(p, arg, val, -1) + } + + u := *c.endpoint + u.Path = p + + return &u +} + +func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx != nil { + req = req.WithContext(ctx) + } + resp, err := c.client.Do(req) + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + close(done) + }() + + select { + case <-ctx.Done(): + err = resp.Body.Close() + <-done + if err == nil { + err = ctx.Err() + } + case <-done: + } + + return resp, body, err +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/api/client_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/api/client_test.go new file mode 100644 index 00000000..b1bcfc9b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/api/client_test.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package api + +import ( + "net/http" + "net/url" + "testing" +) + +func TestConfig(t *testing.T) { + c := Config{} + if c.roundTripper() != DefaultRoundTripper { + t.Fatalf("expected default roundtripper for nil RoundTripper field") + } +} + +func TestClientURL(t *testing.T) { + tests := []struct { + address string + endpoint string + args map[string]string + expected string + }{ + { + address: "http://localhost:9090", + endpoint: "/test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost", + endpoint: "/test", + expected: "http://localhost/test", + }, + { + address: "http://localhost:9090", + endpoint: "test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost:9090/prefix", + endpoint: "/test", + expected: "http://localhost:9090/prefix/test", + }, + { + address: "https://localhost:9090/", + endpoint: "/test/", + expected: "https://localhost:9090/test", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content/more/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:foo", + args: map[string]string{ + "param": "content", + "foo": "bar", + }, + expected: "http://localhost:9090/test/content/more/bar", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "nonexistent": "content", + }, + expected: "http://localhost:9090/test/:param", + }, + } + + for _, test := range tests { + ep, err := url.Parse(test.address) + if err != nil { + t.Fatal(err) + } + + hclient := &httpClient{ + endpoint: ep, + client: http.Client{Transport: DefaultRoundTripper}, + } + + u := hclient.URL(test.endpoint, test.args) + if u.String() != test.expected { + t.Errorf("unexpected result: got %s, want %s", u, test.expected) + continue + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api.go b/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api.go new file mode 100644 index 00000000..c41c0cf5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api.go @@ -0,0 +1,497 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package v1 provides bindings to the Prometheus HTTP API v1: +// http://prometheus.io/docs/querying/api/ +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/common/model" +) + +const ( + statusAPIError = 422 + + apiPrefix = "/api/v1" + + epAlertManagers = apiPrefix + "/alertmanagers" + epQuery = apiPrefix + "/query" + epQueryRange = apiPrefix + "/query_range" + epLabelValues = apiPrefix + "/label/:name/values" + epSeries = apiPrefix + "/series" + epTargets = apiPrefix + "/targets" + epSnapshot = apiPrefix + "/admin/tsdb/snapshot" + epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series" + epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones" + epConfig = apiPrefix + "/status/config" + epFlags = apiPrefix + "/status/flags" +) + +// ErrorType models the different API error types. +type ErrorType string + +// HealthStatus models the health status of a scrape target. +type HealthStatus string + +const ( + // Possible values for ErrorType. + ErrBadData ErrorType = "bad_data" + ErrTimeout = "timeout" + ErrCanceled = "canceled" + ErrExec = "execution" + ErrBadResponse = "bad_response" + + // Possible values for HealthStatus. + HealthGood HealthStatus = "up" + HealthUnknown HealthStatus = "unknown" + HealthBad HealthStatus = "down" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +// API provides bindings for Prometheus's v1 API. +type API interface { + // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. + AlertManagers(ctx context.Context) (AlertManagersResult, error) + // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. + CleanTombstones(ctx context.Context) error + // Config returns the current Prometheus configuration. + Config(ctx context.Context) (ConfigResult, error) + // DeleteSeries deletes data for a selection of series in a time range. + DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error + // Flags returns the flag values that Prometheus was launched with. + Flags(ctx context.Context) (FlagsResult, error) + // LabelValues performs a query for the values of the given label. + LabelValues(ctx context.Context, label string) (model.LabelValues, error) + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, error) + // QueryRange performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, error) + // Series finds series by label matchers. + Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) + // Snapshot creates a snapshot of all current data into snapshots/- + // under the TSDB's data directory and returns the directory as response. + Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) + // Targets returns an overview of the current state of the Prometheus target discovery. + Targets(ctx context.Context) (TargetsResult, error) +} + +// AlertManagersResult contains the result from querying the alertmanagers endpoint. +type AlertManagersResult struct { + Active []AlertManager `json:"activeAlertManagers"` + Dropped []AlertManager `json:"droppedAlertManagers"` +} + +// AlertManager models a configured Alert Manager. +type AlertManager struct { + URL string `json:"url"` +} + +// ConfigResult contains the result from querying the config endpoint. +type ConfigResult struct { + YAML string `json:"yaml"` +} + +// FlagsResult contains the result from querying the flag endpoint. +type FlagsResult map[string]string + +// SnapshotResult contains the result from querying the snapshot endpoint. +type SnapshotResult struct { + Name string `json:"name"` +} + +// TargetsResult contains the result from querying the targets endpoint. +type TargetsResult struct { + Active []ActiveTarget `json:"activeTargets"` + Dropped []DroppedTarget `json:"droppedTargets"` +} + +// ActiveTarget models an active Prometheus scrape target. +type ActiveTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` + Labels model.LabelSet `json:"labels"` + ScrapeURL string `json:"scrapeUrl"` + LastError string `json:"lastError"` + LastScrape time.Time `json:"lastScrape"` + Health HealthStatus `json:"health"` +} + +// DroppedTarget models a dropped Prometheus scrape target. +type DroppedTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// NewAPI returns a new API for the client. +// +// It is safe to use the returned API from multiple goroutines. +func NewAPI(c api.Client) API { + return &httpAPI{client: apiClient{c}} +} + +type httpAPI struct { + client api.Client +} + +func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { + u := h.client.URL(epAlertManagers, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return AlertManagersResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return AlertManagersResult{}, err + } + + var res AlertManagersResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) CleanTombstones(ctx context.Context) error { + u := h.client.URL(epCleanTombstones, nil) + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { + u := h.client.URL(epConfig, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ConfigResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return ConfigResult{}, err + } + + var res ConfigResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { + u := h.client.URL(epDeleteSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { + u := h.client.URL(epFlags, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return FlagsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return FlagsResult{}, err + } + + var res FlagsResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) { + u := h.client.URL(epLabelValues, map[string]string{"name": label}) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + var labelValues model.LabelValues + err = json.Unmarshal(body, &labelValues) + return labelValues, err +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { + u := h.client.URL(epQuery, nil) + q := u.Query() + + q.Set("query", query) + if !ts.IsZero() { + q.Set("time", ts.Format(time.RFC3339Nano)) + } + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { + u := h.client.URL(epQueryRange, nil) + q := u.Query() + + var ( + start = r.Start.Format(time.RFC3339Nano) + end = r.End.Format(time.RFC3339Nano) + step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) + ) + + q.Set("query", query) + q.Set("start", start) + q.Set("end", end) + q.Set("step", step) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) { + u := h.client.URL(epSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var mset []model.LabelSet + err = json.Unmarshal(body, &mset) + return mset, err +} + +func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { + u := h.client.URL(epSnapshot, nil) + q := u.Query() + + q.Set("skip_head", strconv.FormatBool(skipHead)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return SnapshotResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return SnapshotResult{}, err + } + + var res SnapshotResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { + u := h.client.URL(epTargets, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return TargetsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return TargetsResult{}, err + } + + var res TargetsResult + err = json.Unmarshal(body, &res) + return res, err +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` +} + +func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, body, err := c.Client.Do(ctx, req) + if err != nil { + return resp, body, err + } + + code := resp.StatusCode + + if code/100 != 2 && code != statusAPIError { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: fmt.Sprintf("bad response code %d", resp.StatusCode), + } + } + + var result apiResponse + + if err = json.Unmarshal(body, &result); err != nil { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: err.Error(), + } + } + + if (code == statusAPIError) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if code == statusAPIError && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), err +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api_test.go new file mode 100644 index 00000000..2fe60445 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/api/prometheus/v1/api_test.go @@ -0,0 +1,693 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "github.com/prometheus/common/model" +) + +type apiTest struct { + do func() (interface{}, error) + inErr error + inRes interface{} + + reqPath string + reqParam url.Values + reqMethod string + res interface{} + err error +} + +type apiTestClient struct { + *testing.T + curTest apiTest +} + +func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL { + path := ep + for k, v := range args { + path = strings.Replace(path, ":"+k, v, -1) + } + u := &url.URL{ + Host: "test:9090", + Path: path, + } + return u +} + +func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + + test := c.curTest + + if req.URL.Path != test.reqPath { + c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) + } + if req.Method != test.reqMethod { + c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) + } + + b, err := json.Marshal(test.inRes) + if err != nil { + c.Fatal(err) + } + + resp := &http.Response{} + if test.inErr != nil { + resp.StatusCode = statusAPIError + } else { + resp.StatusCode = http.StatusOK + } + + return resp, b, test.inErr +} + +func TestAPIs(t *testing.T) { + + testTime := time.Now() + + client := &apiTestClient{T: t} + + promAPI := &httpAPI{ + client: client, + } + + doAlertManagers := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.AlertManagers(context.Background()) + } + } + + doCleanTombstones := func() func() (interface{}, error) { + return func() (interface{}, error) { + return nil, promAPI.CleanTombstones(context.Background()) + } + } + + doConfig := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Config(context.Background()) + } + } + + doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime) + } + } + + doFlags := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Flags(context.Background()) + } + } + + doLabelValues := func(label string) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.LabelValues(context.Background(), label) + } + } + + doQuery := func(q string, ts time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Query(context.Background(), q, ts) + } + } + + doQueryRange := func(q string, rng Range) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.QueryRange(context.Background(), q, rng) + } + } + + doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime) + } + } + + doSnapshot := func(skipHead bool) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Snapshot(context.Background(), skipHead) + } + } + + doTargets := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Targets(context.Background()) + } + } + + queryTests := []apiTest{ + { + do: doQuery("2", testTime), + inRes: &queryResult{ + Type: model.ValScalar, + Result: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + { + do: doQuery("2", testTime), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doQueryRange("2", Range{ + Start: testTime.Add(-time.Minute), + End: testTime, + Step: time.Minute, + }), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query_range", + reqParam: url.Values{ + "query": []string{"2"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + "step": []string{time.Minute.String()}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doLabelValues("mylabel"), + inRes: []string{"val1", "val2"}, + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + res: model.LabelValues{"val1", "val2"}, + }, + + { + do: doLabelValues("mylabel"), + inErr: fmt.Errorf("some error"), + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + err: fmt.Errorf("some error"), + }, + + { + do: doSeries("up", testTime.Add(-time.Minute), testTime), + inRes: []map[string]string{ + { + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090"}, + }, + reqMethod: "GET", + reqPath: "/api/v1/series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: []model.LabelSet{ + model.LabelSet{ + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090", + }, + }, + }, + + { + do: doSeries("up", testTime.Add(-time.Minute), testTime), + inErr: fmt.Errorf("some error"), + reqMethod: "GET", + reqPath: "/api/v1/series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doSnapshot(true), + inRes: map[string]string{ + "name": "20171210T211224Z-2be650b6d019eb54", + }, + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/snapshot", + reqParam: url.Values{ + "skip_head": []string{"true"}, + }, + res: SnapshotResult{ + Name: "20171210T211224Z-2be650b6d019eb54", + }, + }, + + { + do: doSnapshot(true), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/snapshot", + err: fmt.Errorf("some error"), + }, + + { + do: doCleanTombstones(), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/clean_tombstones", + }, + + { + do: doCleanTombstones(), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/clean_tombstones", + err: fmt.Errorf("some error"), + }, + + { + do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), + inRes: []map[string]string{ + { + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090"}, + }, + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/delete_series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + }, + + { + do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/delete_series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doConfig(), + reqMethod: "GET", + reqPath: "/api/v1/status/config", + inRes: map[string]string{ + "yaml": "", + }, + res: ConfigResult{ + YAML: "", + }, + }, + + { + do: doConfig(), + reqMethod: "GET", + reqPath: "/api/v1/status/config", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doFlags(), + reqMethod: "GET", + reqPath: "/api/v1/status/flags", + inRes: map[string]string{ + "alertmanager.notification-queue-capacity": "10000", + "alertmanager.timeout": "10s", + "log.level": "info", + "query.lookback-delta": "5m", + "query.max-concurrency": "20", + }, + res: FlagsResult{ + "alertmanager.notification-queue-capacity": "10000", + "alertmanager.timeout": "10s", + "log.level": "info", + "query.lookback-delta": "5m", + "query.max-concurrency": "20", + }, + }, + + { + do: doFlags(), + reqMethod: "GET", + reqPath: "/api/v1/status/flags", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doAlertManagers(), + reqMethod: "GET", + reqPath: "/api/v1/alertmanagers", + inRes: map[string]interface{}{ + "activeAlertManagers": []map[string]string{ + { + "url": "http://127.0.0.1:9091/api/v1/alerts", + }, + }, + "droppedAlertManagers": []map[string]string{ + { + "url": "http://127.0.0.1:9092/api/v1/alerts", + }, + }, + }, + res: AlertManagersResult{ + Active: []AlertManager{ + { + URL: "http://127.0.0.1:9091/api/v1/alerts", + }, + }, + Dropped: []AlertManager{ + { + URL: "http://127.0.0.1:9092/api/v1/alerts", + }, + }, + }, + }, + + { + do: doAlertManagers(), + reqMethod: "GET", + reqPath: "/api/v1/alertmanagers", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doTargets(), + reqMethod: "GET", + reqPath: "/api/v1/targets", + inRes: map[string]interface{}{ + "activeTargets": []map[string]interface{}{ + { + "discoveredLabels": map[string]string{ + "__address__": "127.0.0.1:9090", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "prometheus", + }, + "labels": map[string]string{ + "instance": "127.0.0.1:9090", + "job": "prometheus", + }, + "scrapeUrl": "http://127.0.0.1:9090", + "lastError": "error while scraping target", + "lastScrape": testTime.UTC().Format(time.RFC3339Nano), + "health": "up", + }, + }, + "droppedTargets": []map[string]interface{}{ + { + "discoveredLabels": map[string]string{ + "__address__": "127.0.0.1:9100", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node", + }, + }, + }, + }, + res: TargetsResult{ + Active: []ActiveTarget{ + { + DiscoveredLabels: model.LabelSet{ + "__address__": "127.0.0.1:9090", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "prometheus", + }, + Labels: model.LabelSet{ + "instance": "127.0.0.1:9090", + "job": "prometheus", + }, + ScrapeURL: "http://127.0.0.1:9090", + LastError: "error while scraping target", + LastScrape: testTime.UTC(), + Health: HealthGood, + }, + }, + Dropped: []DroppedTarget{ + { + DiscoveredLabels: model.LabelSet{ + "__address__": "127.0.0.1:9100", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node", + }, + }, + }, + }, + }, + + { + do: doTargets(), + reqMethod: "GET", + reqPath: "/api/v1/targets", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + } + + var tests []apiTest + tests = append(tests, queryTests...) + + for _, test := range tests { + client.curTest = test + + res, err := test.do() + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if err.Error() != test.err.Error() { + t.Errorf("unexpected error: want %s, got %s", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpected error: %s", err) + continue + } + + if !reflect.DeepEqual(res, test.res) { + t.Errorf("unexpected result: want %v, got %v", test.res, res) + } + } +} + +type testClient struct { + *testing.T + + ch chan apiClientTest + req *http.Request +} + +type apiClientTest struct { + code int + response interface{} + expected string + err *Error +} + +func (c *testClient) URL(ep string, args map[string]string) *url.URL { + return nil +} + +func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx == nil { + c.Fatalf("context was not passed down") + } + if req != c.req { + c.Fatalf("request was not passed down") + } + + test := <-c.ch + + var b []byte + var err error + + switch v := test.response.(type) { + case string: + b = []byte(v) + default: + b, err = json.Marshal(v) + if err != nil { + c.Fatal(err) + } + } + + resp := &http.Response{ + StatusCode: test.code, + } + + return resp, b, nil +} + +func TestAPIClientDo(t *testing.T) { + tests := []apiClientTest{ + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`null`), + ErrorType: ErrBadData, + Error: "failed", + }, + err: &Error{ + Type: ErrBadData, + Msg: "failed", + }, + code: statusAPIError, + expected: `null`, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrTimeout, + Msg: "timed out", + }, + code: statusAPIError, + expected: `test`, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "bad response code 400", + }, + code: http.StatusBadRequest, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "invalid character 'b' looking for beginning of value", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: http.StatusOK, + }, + } + + tc := &testClient{ + T: t, + ch: make(chan apiClientTest, 1), + req: &http.Request{}, + } + client := &apiClient{tc} + + for _, test := range tests { + + tc.ch <- test + + _, body, err := client.Do(context.Background(), tc.req) + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if test.err.Error() != err.Error() { + t.Errorf("unexpected error: want %q, got %q", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpeceted error %s", err) + continue + } + + want, got := test.expected, string(body) + if want != got { + t.Errorf("unexpected body: want %q, got %q", want, got) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/Dockerfile b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/Dockerfile new file mode 100644 index 00000000..32b6846e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/Dockerfile @@ -0,0 +1,20 @@ +# This Dockerfile builds an image for a client_golang example. +# +# Use as (from the root for the client_golang repository): +# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name . + +# Builder image, where we build the example. +FROM golang:1.9.0 AS builder +WORKDIR /go/src/github.com/prometheus/client_golang +COPY . . +WORKDIR /go/src/github.com/prometheus/client_golang/prometheus +RUN go get -d +WORKDIR /go/src/github.com/prometheus/client_golang/examples/random +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' + +# Final image. +FROM scratch +LABEL maintainer "The Prometheus Authors " +COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random . +EXPOSE 8080 +ENTRYPOINT ["/random"] diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/main.go b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/main.go new file mode 100644 index 00000000..eef50d20 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/random/main.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A simple example exposing fictional RPC latencies with different types of +// random distributions (uniform, normal, and exponential) as Prometheus +// metrics. +package main + +import ( + "flag" + "log" + "math" + "math/rand" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.") + normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.") + normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.") + oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") +) + +var ( + // Create a summary to track fictional interservice RPC latencies for three + // distinct services with different latency distributions. These services are + // differentiated via a "service" label. + rpcDurations = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "rpc_durations_seconds", + Help: "RPC latency distributions.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"service"}, + ) + // The same as above, but now as a histogram, and only for the normal + // distribution. The buckets are targeted to the parameters of the + // normal distribution, with 20 buckets centered on the mean, each + // half-sigma wide. + rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + }) +) + +func init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister(rpcDurations) + prometheus.MustRegister(rpcDurationsHistogram) +} + +func main() { + flag.Parse() + + start := time.Now() + + oscillationFactor := func() float64 { + return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) + } + + // Periodically record some sample latencies for the three services. + go func() { + for { + v := rand.Float64() * *uniformDomain + rpcDurations.WithLabelValues("uniform").Observe(v) + time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := (rand.NormFloat64() * *normDomain) + *normMean + rpcDurations.WithLabelValues("normal").Observe(v) + rpcDurationsHistogram.Observe(v) + time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := rand.ExpFloat64() / 1e6 + rpcDurations.WithLabelValues("exponential").Observe(v) + time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) + } + }() + + // Expose the registered metrics via HTTP. + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/Dockerfile b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/Dockerfile new file mode 100644 index 00000000..99b49d78 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/Dockerfile @@ -0,0 +1,20 @@ +# This Dockerfile builds an image for a client_golang example. +# +# Use as (from the root for the client_golang repository): +# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name . + +# Builder image, where we build the example. +FROM golang:1.9.0 AS builder +WORKDIR /go/src/github.com/prometheus/client_golang +COPY . . +WORKDIR /go/src/github.com/prometheus/client_golang/prometheus +RUN go get -d +WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' + +# Final image. +FROM scratch +LABEL maintainer "The Prometheus Authors " +COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/simple . +EXPOSE 8080 +ENTRYPOINT ["/simple"] diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/main.go b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/main.go new file mode 100644 index 00000000..1fc23249 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/examples/simple/main.go @@ -0,0 +1,31 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A minimal example of how to include Prometheus instrumentation. +package main + +import ( + "flag" + "log" + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + +func main() { + flag.Parse() + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/README.md new file mode 100644 index 00000000..44986bff --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/benchmark_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/benchmark_test.go new file mode 100644 index 00000000..4a05721d --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/benchmark_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkParallelCounter(b *testing.B) { + c := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A Counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + c.Inc() + } + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/collector.go new file mode 100644 index 00000000..623d3d83 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter.go new file mode 100644 index 00000000..765e4550 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter_test.go new file mode 100644 index 00000000..5062f51a --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/counter_test.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + counter.Add(42) + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(43), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + counter.Add(24.42) + if expected, got := 24.42, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(43), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} + +func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + counterVec := NewCounterVec(CounterOpts{ + Name: "test", + }, []string{"a"}) + + labelValues := make([]string, len(test.labels)) + for _, val := range test.labels { + labelValues = append(labelValues, val) + } + + expectPanic(t, func() { + counterVec.WithLabelValues(labelValues...) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + expectPanic(t, func() { + counterVec.With(test.labels) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil { + t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc) + } + if _, err := counterVec.GetMetricWith(test.labels); err == nil { + t.Errorf("GetMetricWith: expected error because: %s", test.desc) + } + } +} + +func expectPanic(t *testing.T, op func(), errorMsg string) { + defer func() { + if err := recover(); err == nil { + t.Error(errorMsg) + } + }() + + op() +} + +func TestCounterAddInf(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + + counter.Inc() + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + counter.Add(math.Inf(1)) + if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + counter.Inc() + if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(2), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestCounterAddLarge(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + + // large overflows the underlying type and should therefore be stored in valBits. + large := float64(math.MaxUint64 + 1) + counter.Add(large) + if expected, got := large, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(0), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := fmt.Sprintf("counter: ", large), m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestCounterAddSmall(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + small := 0.000000000001 + counter.Add(small) + if expected, got := small, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(0), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := fmt.Sprintf("counter: ", small), m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc.go new file mode 100644 index 00000000..4a755b0f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc_test.go new file mode 100644 index 00000000..2f962652 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/desc_test.go @@ -0,0 +1,17 @@ +package prometheus + +import ( + "testing" +) + +func TestNewDescInvalidLabelValues(t *testing.T) { + desc := NewDesc( + "sample_label", + "sample label", + nil, + Labels{"a": "\xFF"}, + ) + if desc.err == nil { + t.Errorf("NewDesc: expected error because: %s", desc.err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/doc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/doc.go new file mode 100644 index 00000000..83c3657d --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/doc.go @@ -0,0 +1,191 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_clustermanager_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_clustermanager_test.go new file mode 100644 index 00000000..260c1b52 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_clustermanager_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import "github.com/prometheus/client_golang/prometheus" + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe simply sends the two Descs in the struct to the channel. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + for host, oomCount := range oomCountByHost { + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) + } + for host, ramUsage := range ramUsageByHost { + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) + } +} + +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + } +} + +func ExampleCollector() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_complex_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_complex_test.go new file mode 100644 index 00000000..c5e7de5e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_complex_test.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // apiRequestDuration tracks the duration separate for each HTTP status + // class (1xx, 2xx, ...). This creates a fair amount of time series on + // the Prometheus server. Usually, you would track the duration of + // serving HTTP request without partitioning by outcome. Do something + // like this only if needed. Also note how only status classes are + // tracked, not every single status code. The latter would create an + // even larger amount of time series. Request counters partitioned by + // status code are usually OK as each counter only creates one time + // series. Histograms are way more expensive, so partition with care and + // only where you really need separate latency tracking. Partitioning by + // status class is only an example. In concrete cases, other partitions + // might make more sense. + apiRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "api_request_duration_seconds", + Help: "Histogram for the request duration of the public API, partitioned by status class.", + Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), + }, + []string{"status_class"}, + ) +) + +func handler(w http.ResponseWriter, r *http.Request) { + status := http.StatusOK + // The ObserverFunc gets called by the deferred ObserveDuration and + // decides which Histogram's Observe method is called. + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + switch { + case status >= 500: // Server error. + apiRequestDuration.WithLabelValues("5xx").Observe(v) + case status >= 400: // Client error. + apiRequestDuration.WithLabelValues("4xx").Observe(v) + case status >= 300: // Redirection. + apiRequestDuration.WithLabelValues("3xx").Observe(v) + case status >= 200: // Success. + apiRequestDuration.WithLabelValues("2xx").Observe(v) + default: // Informational. + apiRequestDuration.WithLabelValues("1xx").Observe(v) + } + })) + defer timer.ObserveDuration() + + // Handle the request. Set status accordingly. + // ... +} + +func ExampleTimer_complex() { + http.HandleFunc("/api", handler) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_gauge_test.go new file mode 100644 index 00000000..7184a0d1 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_gauge_test.go @@ -0,0 +1,48 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "os" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // If a function is called rarely (i.e. not more often than scrapes + // happen) or ideally only once (like in a batch job), it can make sense + // to use a Gauge for timing the function call. For timing a batch job + // and pushing the result to a Pushgateway, see also the comprehensive + // example in the push package. + funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "example_function_duration_seconds", + Help: "Duration of the last call of an example function.", + }) +) + +func run() error { + // The Set method of the Gauge is used to observe the duration. + timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set)) + defer timer.ObserveDuration() + + // Do something. Return errors as encountered. The use of 'defer' above + // makes sure the function is still timed properly. + return nil +} + +func ExampleTimer_gauge() { + if err := run(); err != nil { + os.Exit(1) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_test.go new file mode 100644 index 00000000..bd86bb47 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/example_timer_test.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "math/rand" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "example_request_duration_seconds", + Help: "Histogram for the runtime of a simple example function.", + Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), + }) +) + +func ExampleTimer() { + // timer times this example function. It uses a Histogram, but a Summary + // would also work, as both implement Observer. Check out + // https://prometheus.io/docs/practices/histograms/ for differences. + timer := prometheus.NewTimer(requestDuration) + defer timer.ObserveDuration() + + // Do something here that takes time. + time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/examples_test.go new file mode 100644 index 00000000..45f60650 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/examples_test.go @@ -0,0 +1,754 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "bytes" + "fmt" + "math" + "net/http" + "runtime" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + {Name: proto.String("status"), Value: proto.String("404")}, + {Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + fmt.Println(proto.MarshalTextString(metricFamilies[0])) + + // Output: + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > +} + +func ExampleNewConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleNewConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", + }) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } + } + reqCounter.Inc() +} + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector.go new file mode 100644 index 00000000..18a99d5f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector_test.go new file mode 100644 index 00000000..6bcd9b69 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/expvar_collector_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleNewExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if !strings.Contains(m.Desc().String(), "expvar_memstats") { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/fnv.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/fnv.go new file mode 100644 index 00000000..e3b67df8 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge.go new file mode 100644 index 00000000..17c72d7e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge_test.go new file mode 100644 index 00000000..a2e3c141 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/gauge_test.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*gauge).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*gauge).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestGaugeSetCurrentTime(t *testing.T) { + g := NewGauge(GaugeOpts{ + Name: "test_name", + Help: "test help", + }) + g.SetToCurrentTime() + unixTime := float64(time.Now().Unix()) + + m := &dto.Metric{} + g.Write(m) + + delta := unixTime - m.GetGauge().GetValue() + // This is just a smoke test to make sure SetToCurrentTime is not + // totally off. Tests with current time involved are hard... + if math.Abs(delta) > 5 { + t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector.go new file mode 100644 index 00000000..0440bd12 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector.go @@ -0,0 +1,288 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This causes a stop-the-world, which is very short with Go1.9+ +// (~25µs). However, with older Go versions, the stop-the-world duration depends +// on the heap size and can be quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector_test.go new file mode 100644 index 00000000..72264da9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/go_collector_test.go @@ -0,0 +1,123 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case m := <-ch: + // m can be Gauge or Counter, + // currently just test the go_goroutines Gauge + // and ignore others. + if m.Desc().fqName != "go_goroutines" { + continue + } + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs three sends per call. + // On line 27 we need to receive three more sends + // to shut down cleanly. + <-ch + <-ch + <-ch + return + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + pb := &dto.Metric{} + metric.Write(pb) + if pb.GetSummary() == nil { + continue + } + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge.go new file mode 100644 index 00000000..11533374 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge.go @@ -0,0 +1,280 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graphite provides a bridge to push Prometheus metrics to a Graphite +// server. +package graphite + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "sort" + "time" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultInterval = 15 * time.Second + millisecondsPerSecond = 1000 +) + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Ignore errors and try to push as many metrics to Graphite as possible. + ContinueOnError HandlerErrorHandling = iota + + // Abort the push to Graphite upon the first error encountered. + AbortOnError +) + +// Config defines the Graphite bridge config. +type Config struct { + // The url to push data to. Required. + URL string + + // The prefix for the pushed Graphite metrics. Defaults to empty string. + Prefix string + + // The interval to use for pushing data to Graphite. Defaults to 15 seconds. + Interval time.Duration + + // The timeout for pushing metrics to Graphite. Defaults to 15 seconds. + Timeout time.Duration + + // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer. + Gatherer prometheus.Gatherer + + // The logger that messages are written to. Defaults to no logging. + Logger Logger + + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided Logger + // is not nil. + ErrorHandling HandlerErrorHandling +} + +// Bridge pushes metrics to the configured Graphite server. +type Bridge struct { + url string + prefix string + interval time.Duration + timeout time.Duration + + errorHandling HandlerErrorHandling + logger Logger + + g prometheus.Gatherer +} + +// Logger is the minimal interface Bridge needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// NewBridge returns a pointer to a new Bridge struct. +func NewBridge(c *Config) (*Bridge, error) { + b := &Bridge{} + + if c.URL == "" { + return nil, errors.New("missing URL") + } + b.url = c.URL + + if c.Gatherer == nil { + b.g = prometheus.DefaultGatherer + } else { + b.g = c.Gatherer + } + + if c.Logger != nil { + b.logger = c.Logger + } + + if c.Prefix != "" { + b.prefix = c.Prefix + } + + var z time.Duration + if c.Interval == z { + b.interval = defaultInterval + } else { + b.interval = c.Interval + } + + if c.Timeout == z { + b.timeout = defaultInterval + } else { + b.timeout = c.Timeout + } + + b.errorHandling = c.ErrorHandling + + return b, nil +} + +// Run starts the event loop that pushes Prometheus metrics to Graphite at the +// configured interval. +func (b *Bridge) Run(ctx context.Context) { + ticker := time.NewTicker(b.interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := b.Push(); err != nil && b.logger != nil { + b.logger.Println("error pushing to Graphite:", err) + } + case <-ctx.Done(): + return + } + } +} + +// Push pushes Prometheus metrics to the configured Graphite server. +func (b *Bridge) Push() error { + mfs, err := b.g.Gather() + if err != nil || len(mfs) == 0 { + switch b.errorHandling { + case AbortOnError: + return err + case ContinueOnError: + if b.logger != nil { + b.logger.Println("continue on error:", err) + } + default: + panic("unrecognized error handling value") + } + } + + conn, err := net.DialTimeout("tcp", b.url, b.timeout) + if err != nil { + return err + } + defer conn.Close() + + return writeMetrics(conn, mfs, b.prefix, model.Now()) +} + +func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error { + vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{ + Timestamp: now, + }, mfs...) + if err != nil { + return err + } + + buf := bufio.NewWriter(w) + for _, s := range vec { + if err := writeSanitized(buf, prefix); err != nil { + return err + } + if err := buf.WriteByte('.'); err != nil { + return err + } + if err := writeMetric(buf, s.Metric); err != nil { + return err + } + if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil { + return err + } + if err := buf.Flush(); err != nil { + return err + } + } + + return nil +} + +func writeMetric(buf *bufio.Writer, m model.Metric) error { + metricName, hasName := m[model.MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != model.MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value))) + } + } + + var err error + switch numLabels { + case 0: + if hasName { + return writeSanitized(buf, string(metricName)) + } + default: + sort.Strings(labelStrings) + if err = writeSanitized(buf, string(metricName)); err != nil { + return err + } + for _, s := range labelStrings { + if err = buf.WriteByte('.'); err != nil { + return err + } + if err = writeSanitized(buf, s); err != nil { + return err + } + } + } + return nil +} + +func writeSanitized(buf *bufio.Writer, s string) error { + prevUnderscore := false + + for _, c := range s { + c = replaceInvalidRune(c) + if c == '_' { + if prevUnderscore { + continue + } + prevUnderscore = true + } else { + prevUnderscore = false + } + if _, err := buf.WriteRune(c); err != nil { + return err + } + } + + return nil +} + +func replaceInvalidRune(c rune) rune { + if c == ' ' { + return '.' + } + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) { + return '_' + } + return c +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge_test.go new file mode 100644 index 00000000..c2b274c6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/graphite/bridge_test.go @@ -0,0 +1,309 @@ +package graphite + +import ( + "bufio" + "bytes" + "io" + "log" + "net" + "os" + "regexp" + "testing" + "time" + + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestSanitize(t *testing.T) { + testCases := []struct { + in, out string + }{ + {in: "hello", out: "hello"}, + {in: "hE/l1o", out: "hE_l1o"}, + {in: "he,*ll(.o", out: "he_ll_o"}, + {in: "hello_there%^&", out: "hello_there_"}, + } + + var buf bytes.Buffer + w := bufio.NewWriter(&buf) + + for i, tc := range testCases { + if err := writeSanitized(w, tc.in); err != nil { + t.Fatalf("write failed: %v", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush failed: %v", err) + } + + if want, got := tc.out, buf.String(); want != got { + t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) + } + + buf.Reset() + } +} + +func TestWriteSummary(t *testing.T) { + sumVec := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"labelname"}, + ) + + sumVec.WithLabelValues("val1").Observe(float64(10)) + sumVec.WithLabelValues("val1").Observe(float64(20)) + sumVec.WithLabelValues("val1").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(20)) + sumVec.WithLabelValues("val2").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(sumVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +` + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestWriteHistogram(t *testing.T) { + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Buckets: []float64{0.01, 0.02, 0.05, 0.1}, + }, + []string{"labelname"}, + ) + + histVec.WithLabelValues("val1").Observe(float64(10)) + histVec.WithLabelValues("val1").Observe(float64(20)) + histVec.WithLabelValues("val1").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(20)) + histVec.WithLabelValues("val2").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(histVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043 +` + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestToReader(t *testing.T) { + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + + reg := prometheus.NewRegistry() + reg.MustRegister(cntVec) + + want := `prefix.name.constname.constvalue.labelname.val1 1 1477043 +prefix.name.constname.constvalue.labelname.val2 1 1477043 +` + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestPush(t *testing.T) { + reg := prometheus.NewRegistry() + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + host := "localhost" + port := ":56789" + b, err := NewBridge(&Config{ + URL: host + port, + Gatherer: reg, + Prefix: "prefix", + }) + if err != nil { + t.Fatalf("error creating bridge: %v", err) + } + + nmg, err := newMockGraphite(port) + if err != nil { + t.Fatalf("error creating mock graphite: %v", err) + } + defer nmg.Close() + + err = b.Push() + if err != nil { + t.Fatalf("error pushing: %v", err) + } + + wants := []string{ + "prefix.name.constname.constvalue.labelname.val1 1", + "prefix.name.constname.constvalue.labelname.val2 1", + } + + select { + case got := <-nmg.readc: + for _, want := range wants { + matched, err := regexp.MatchString(want, got) + if err != nil { + t.Fatalf("error pushing: %v", err) + } + if !matched { + t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got) + } + } + return + case err := <-nmg.errc: + t.Fatalf("error reading push: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("no result from graphite server") + } +} + +func newMockGraphite(port string) (*mockGraphite, error) { + readc := make(chan string) + errc := make(chan error) + ln, err := net.Listen("tcp", port) + if err != nil { + return nil, err + } + + go func() { + conn, err := ln.Accept() + if err != nil { + errc <- err + } + var b bytes.Buffer + io.Copy(&b, conn) + readc <- b.String() + }() + + return &mockGraphite{ + readc: readc, + errc: errc, + Listener: ln, + }, nil +} + +type mockGraphite struct { + readc chan string + errc chan error + + net.Listener +} + +func ExampleBridge() { + b, err := NewBridge(&Config{ + URL: "graphite.example.org:3099", + Gatherer: prometheus.DefaultGatherer, + Prefix: "prefix", + Interval: 15 * time.Second, + Timeout: 10 * time.Second, + ErrorHandling: AbortOnError, + Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile), + }) + if err != nil { + panic(err) + } + + go func() { + // Start something in a goroutine that uses metrics. + }() + + // Push initial metrics to Graphite. Fail fast if the push fails. + if err := b.Push(); err != nil { + panic(err) + } + + // Create a Context to control stopping the Run() loop that pushes + // metrics to Graphite. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start pushing metrics to Graphite in the Run() loop. + b.Run(ctx) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram.go new file mode 100644 index 00000000..331783a7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram.go @@ -0,0 +1,505 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram_test.go new file mode 100644 index 00000000..5a20f4b6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/histogram_test.go @@ -0,0 +1,348 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +func TestHistogramNonMonotonicBuckets(t *testing.T) { + testCases := map[string][]float64{ + "not strictly monotonic": {1, 2, 2, 3}, + "not monotonic at all": {1, 2, 4, 3, 5}, + "have +Inf in the middle": {1, 2, math.Inf(+1), 3}, + } + for name, buckets := range testCases { + func() { + defer func() { + if r := recover(); r == nil { + t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name) + } + }() + _ = NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: buckets, + }) + }() + } +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.(Histogram).Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http.go new file mode 100644 index 00000000..4d08154b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http.go @@ -0,0 +1,512 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using +// promhttp.InstrumentedHandler instead. +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http_test.go new file mode 100644 index 00000000..dd89ccf1 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/http_test.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + body := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", body) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + } + + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + err := Register(reqCnt) + if err == nil { + t.Fatal("expected reqCnt to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + err = Register(reqDur) + if err == nil { + t.Fatal("expected reqDur to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + err = Register(reqSz) + if err == nil { + t.Fatal("expected reqSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + err = Register(resSz) + if err == nil { + t.Fatal("expected resSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if resp.Body.String() != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", resp.Body.String()) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.metricMap.metrics); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/labels.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/labels.go new file mode 100644 index 00000000..2502e373 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric.go new file mode 100644 index 00000000..76035bca --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric_test.go new file mode 100644 index 00000000..7145f5e5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/observer.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/observer.go new file mode 100644 index 00000000..5806cd09 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector.go new file mode 100644 index 00000000..b80adc6e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector.go @@ -0,0 +1,139 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time for the given process ID under the given namespace. +// +// Currently, the collector depends on a Linux-style proc filesystem and +// therefore only exports metrics for Linux. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is +// determined on each collect anew by calling the given pidFn function. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector_test.go new file mode 100644 index 00000000..d59ad775 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/process_collector_test.go @@ -0,0 +1,60 @@ +// +build linux + +package prometheus + +import ( + "bytes" + "os" + "regexp" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } + + mfs, err := registry.Gather() + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nprocess_max_fds [1-9]"), + regexp.MustCompile("\nprocess_open_fds [1-9]"), + regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promauto/auto.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promauto/auto.go new file mode 100644 index 00000000..a00ba1eb --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promauto/auto.go @@ -0,0 +1,223 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promauto provides constructors for the usual Prometheus metrics that +// return them already registered with the global registry +// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any +// references to the registry altogether, but all the constructors in this +// package will panic if the registration fails. +// +// The following example is a complete program to create a histogram of normally +// distributed random numbers from the math/rand package: +// +// package main +// +// import ( +// "math/rand" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } +// +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// Prometheus's version of a minimal hello-world program: +// +// package main +// +// import ( +// "fmt" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// This appears very handy. So why are these constructors locked away in a +// separate package? There are two caveats: +// +// First, in more complex programs, global state is often quite problematic. +// That's the reason why the metrics constructors in the prometheus package do +// not interact with the global prometheus.DefaultRegisterer on their own. You +// are free to use the Register or MustRegister functions to register them with +// the global prometheus.DefaultRegisterer, but you could as well choose a local +// Registerer (usually created with prometheus.NewRegistry, but there are other +// scenarios, e.g. testing). +// +// The second issue is that registration may fail, e.g. if a metric inconsistent +// with the newly to be registered one is already registered. But how to signal +// and handle a panic in the automatic registration with the default registry? +// The only way is panicking. While panicking on invalid input provided by the +// programmer is certainly fine, things are a bit more subtle in this case: You +// might just add another package to the program, and that package (in its init +// function) happens to register a metric with the same name as your code. Now, +// all of a sudden, either your code or the code of the newly imported package +// panics, depending on initialization order, without any opportunity to handle +// the case gracefully. Even worse is a scenario where registration happens +// later during the runtime (e.g. upon loading some kind of plugin), where the +// panic could be triggered long after the code has been deployed to +// production. A possibility to panic should be explicitly called out by the +// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of +// constructors in the prometheus package called MustRegisterNewCounterVec or +// similar would be quite unwieldy. Adding an extra MustRegister method to each +// metric, returning the registered metric, would result in nice code for those +// using the method, but would pollute every single metric interface for +// everybody avoiding the global registry. +// +// To address both issues, the problematic auto-registering and possibly +// panicking constructors are all in this package with a clear warning +// ahead. And whoever cares about avoiding global state and possibly panicking +// function calls can simply ignore the existence of the promauto package +// altogether. +// +// A final note: There is a similar case in the net/http package of the standard +// library. It has DefaultServeMux as a global instance of ServeMux, and the +// Handle function acts on it, panicking if a handler for the same pattern has +// already been registered. However, one might argue that the whole HTTP routing +// is usually set up closely together in the same package or file, while +// Prometheus metrics tend to be spread widely over the codebase, increasing the +// chance of surprising registration failures. Furthermore, the use of global +// state in net/http has been criticized widely, and some avoid it altogether. +package promauto + +import "github.com/prometheus/client_golang/prometheus" + +// NewCounter works like the function of the same name in the prometheus package +// but it automatically registers the Counter with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. +func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { + c := prometheus.NewCounter(opts) + prometheus.MustRegister(c) + return c +} + +// NewCounterVec works like the function of the same name in the prometheus +// package but it automatically registers the CounterVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec +// panics. +func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { + c := prometheus.NewCounterVec(opts, labelNames) + prometheus.MustRegister(c) + return c +} + +// NewCounterFunc works like the function of the same name in the prometheus +// package but it automatically registers the CounterFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc +// panics. +func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { + g := prometheus.NewCounterFunc(opts, function) + prometheus.MustRegister(g) + return g +} + +// NewGauge works like the function of the same name in the prometheus package +// but it automatically registers the Gauge with the +// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. +func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { + g := prometheus.NewGauge(opts) + prometheus.MustRegister(g) + return g +} + +// NewGaugeVec works like the function of the same name in the prometheus +// package but it automatically registers the GaugeVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. +func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { + g := prometheus.NewGaugeVec(opts, labelNames) + prometheus.MustRegister(g) + return g +} + +// NewGaugeFunc works like the function of the same name in the prometheus +// package but it automatically registers the GaugeFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. +func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { + g := prometheus.NewGaugeFunc(opts, function) + prometheus.MustRegister(g) + return g +} + +// NewSummary works like the function of the same name in the prometheus package +// but it automatically registers the Summary with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. +func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { + s := prometheus.NewSummary(opts) + prometheus.MustRegister(s) + return s +} + +// NewSummaryVec works like the function of the same name in the prometheus +// package but it automatically registers the SummaryVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec +// panics. +func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { + s := prometheus.NewSummaryVec(opts, labelNames) + prometheus.MustRegister(s) + return s +} + +// NewHistogram works like the function of the same name in the prometheus +// package but it automatically registers the Histogram with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. +func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { + h := prometheus.NewHistogram(opts) + prometheus.MustRegister(h) + return h +} + +// NewHistogramVec works like the function of the same name in the prometheus +// package but it automatically registers the HistogramVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec +// panics. +func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { + h := prometheus.NewHistogramVec(opts, labelNames) + prometheus.MustRegister(h) + return h +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator.go new file mode 100644 index 00000000..9c1c66dc --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator.go @@ -0,0 +1,199 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } + +func (d *closeNotifierDelegator) CloseNotify() <-chan bool { + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d *flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return &closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return &flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return &hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 00000000..75a905e2 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,181 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +type pusherDelegator struct{ *responseWriterDelegator } + +func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func init() { + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return &pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, &pusherDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, &pusherDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, &pusherDelegator{d}, &readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_pre_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_pre_1_8.go new file mode 100644 index 00000000..8bb9b8b6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/delegator_pre_1_8.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + + return pickDelegator[id](d) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http.go new file mode 100644 index 00000000..01357374 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http.go @@ -0,0 +1,311 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var inFlightSem chan struct{} + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + + h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(w, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { + opts.ErrorLog.Println("error while sending encoded metrics:", err) + } + // TODO(beorn7): Consider streaming serving of metrics. + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http_test.go new file mode 100644 index 00000000..24d2f8cf --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/http_test.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bytes" + "errors" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type errorCollector struct{} + +func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) +} + +func (e errorCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.NewInvalidMetric( + prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), + errors.New("collect error"), + ) +} + +type blockingCollector struct { + CollectStarted, Block chan struct{} +} + +func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil) +} + +func (b blockingCollector) Collect(ch chan<- prometheus.Metric) { + select { + case b.CollectStarted <- struct{}{}: + default: + } + // Collects nothing, just waits for a channel receive. + <-b.Block +} + +func TestHandlerErrorHandling(t *testing.T) { + + // Create a registry that collects a MetricFamily with two elements, + // another with one, and reports an error. + reg := prometheus.NewRegistry() + + cnt := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "the_count", + Help: "Ah-ah-ah! Thunder and lightning!", + }) + reg.MustRegister(cnt) + + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + reg.MustRegister(errorCollector{}) + + logBuf := &bytes.Buffer{} + logger := log.New(logBuf, "", 0) + + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + errorHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: HTTPErrorOnError, + }) + continueHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: ContinueOnError, + }) + panicHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: PanicOnError, + }) + wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantErrorBody := `An error has occurred during metrics gathering: + +error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantOKBody := `# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +# HELP the_count Ah-ah-ah! Thunder and lightning! +# TYPE the_count counter +the_count 0 +` + + errorHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusInternalServerError; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message:\n%s\nwant log message:\n%s\n", got, wantMsg) + } + if got := writer.Body.String(); got != wantErrorBody { + t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) + } + logBuf.Reset() + writer.Body.Reset() + writer.Code = http.StatusOK + + continueHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message %q, want %q", got, wantMsg) + } + if got := writer.Body.String(); got != wantOKBody { + t.Errorf("got body %q, want %q", got, wantOKBody) + } + + defer func() { + if err := recover(); err == nil { + t.Error("expected panic from panicHandler") + } + }() + panicHandler.ServeHTTP(writer, request) +} + +func TestInstrumentMetricHandler(t *testing.T) { + reg := prometheus.NewRegistry() + handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + // Do it again to test idempotency. + InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want := "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + + writer.Body.Reset() + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want = "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } +} + +func TestHandlerMaxRequestsInFlight(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1}) + w1 := httptest.NewRecorder() + w2 := httptest.NewRecorder() + w3 := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + rq1Done := make(chan struct{}) + go func() { + handler.ServeHTTP(w1, request) + close(rq1Done) + }() + <-c.CollectStarted + + handler.ServeHTTP(w2, request) + + if got, want := w2.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) + <-rq1Done + + handler.ServeHTTP(w3, request) + + if got, want := w3.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } +} + +func TestHandlerTimeout(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond}) + w := httptest.NewRecorder() + + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + handler.ServeHTTP(w, request) + + if got, want := w.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) // To not leak a goroutine. +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client.go new file mode 100644 index 00000000..86fd5644 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client.go @@ -0,0 +1,97 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 00000000..a034d1ec --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8_test.go new file mode 100644 index 00000000..7e3f5229 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_client_1_8_test.go @@ -0,0 +1,195 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "log" + "net/http" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestClientMiddlewareAPI(t *testing.T) { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{"method"}, + ) + + reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + client.Transport = InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + resp, err := client.Get("http://google.com") + if err != nil { + t.Fatalf("%v", err) + } + defer resp.Body.Close() +} + +func ExampleInstrumentRoundTripperDuration() { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + // dnsLatencyVec uses custom buckets based on expected dns durations. + // It has an instance label "event", which is set in the + // DNSStart and DNSDonehook functions defined in the + // InstrumentTrace struct below. + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + // tlsLatencyVec uses custom buckets based on expected tls durations. + // It has an instance label "event", which is set in the + // TLSHandshakeStart and TLSHandshakeDone hook functions defined in the + // InstrumentTrace struct below. + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + // histVec has no labels, making it a zero-dimensional ObserverVec. + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{}, + ) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + // Define functions for the available httptrace.ClientTrace hook + // functions that we want to instrument. + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + // Wrap the default RoundTripper with middleware. + roundTripper := InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + // Set the RoundTripper on our client. + client.Transport = roundTripper + + resp, err := client.Get("http://google.com") + if err != nil { + log.Printf("error: %v", err) + } + defer resp.Body.Close() +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server.go new file mode 100644 index 00000000..9db24380 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server_test.go new file mode 100644 index 00000000..716c6f45 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/promhttp/instrument_server_test.go @@ -0,0 +1,401 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "io" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestLabelCheck(t *testing.T) { + scenarios := map[string]struct { + varLabels []string + constLabels []string + curriedLabels []string + ok bool + }{ + "empty": { + varLabels: []string{}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "code as single var label": { + varLabels: []string{"code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "method as single var label": { + varLabels: []string{"method"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "cade and method as var labels": { + varLabels: []string{"method", "code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "valid case with all labels used": { + varLabels: []string{"code", "method"}, + constLabels: []string{"foo", "bar"}, + curriedLabels: []string{"dings", "bums"}, + ok: true, + }, + "unsupported var label": { + varLabels: []string{"foo"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: false, + }, + "mixed var labels": { + varLabels: []string{"method", "foo", "code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: false, + }, + "unsupported var label but curried": { + varLabels: []string{}, + constLabels: []string{}, + curriedLabels: []string{"foo"}, + ok: true, + }, + "mixed var labels but unsupported curried": { + varLabels: []string{"code", "method"}, + constLabels: []string{}, + curriedLabels: []string{"foo"}, + ok: true, + }, + "supported label as const and curry": { + varLabels: []string{}, + constLabels: []string{"code"}, + curriedLabels: []string{"method"}, + ok: true, + }, + "supported label as const and curry with unsupported as var": { + varLabels: []string{"foo"}, + constLabels: []string{"code"}, + curriedLabels: []string{"method"}, + ok: false, + }, + } + + for name, sc := range scenarios { + t.Run(name, func(t *testing.T) { + constLabels := prometheus.Labels{} + for _, l := range sc.constLabels { + constLabels[l] = "dummy" + } + c := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "c", + Help: "c help", + ConstLabels: constLabels, + }, + append(sc.varLabels, sc.curriedLabels...), + ) + o := prometheus.ObserverVec(prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "c", + Help: "c help", + ConstLabels: constLabels, + }, + append(sc.varLabels, sc.curriedLabels...), + )) + for _, l := range sc.curriedLabels { + c = c.MustCurryWith(prometheus.Labels{l: "dummy"}) + o = o.MustCurryWith(prometheus.Labels{l: "dummy"}) + } + + func() { + defer func() { + if err := recover(); err != nil { + if sc.ok { + t.Error("unexpected panic:", err) + } + } else if !sc.ok { + t.Error("expected panic") + } + }() + InstrumentHandlerCounter(c, nil) + }() + func() { + defer func() { + if err := recover(); err != nil { + if sc.ok { + t.Error("unexpected panic:", err) + } + } else if !sc.ok { + t.Error("expected panic") + } + }() + InstrumentHandlerDuration(o, nil) + }() + if sc.ok { + // Test if wantCode and wantMethod were detected correctly. + var wantCode, wantMethod bool + for _, l := range sc.varLabels { + if l == "code" { + wantCode = true + } + if l == "method" { + wantMethod = true + } + } + gotCode, gotMethod := checkLabels(c) + if gotCode != wantCode { + t.Errorf("wanted code=%t for counter, got code=%t", wantCode, gotCode) + } + if gotMethod != wantMethod { + t.Errorf("wanted method=%t for counter, got method=%t", wantMethod, gotMethod) + } + gotCode, gotMethod = checkLabels(o) + if gotCode != wantCode { + t.Errorf("wanted code=%t for observer, got code=%t", wantCode, gotCode) + } + if gotMethod != wantMethod { + t.Errorf("wanted method=%t for observer, got method=%t", wantMethod, gotMethod) + } + } + }) + } +} + +func TestMiddlewareAPI(t *testing.T) { + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{"method"}, + ) + + writeHeaderVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "write_header_duration_seconds", + Help: "A histogram of time to first write latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{}, + ) + + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "push_request_size_bytes", + Help: "A histogram of request sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("OK")) + }) + + reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec) + + chain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerCounter(counter, + InstrumentHandlerDuration(histVec, + InstrumentHandlerTimeToWriteHeader(writeHeaderVec, + InstrumentHandlerResponseSize(responseSize, handler), + ), + ), + ), + ) + + r, _ := http.NewRequest("GET", "www.example.com", nil) + w := httptest.NewRecorder() + chain.ServeHTTP(w, r) +} + +func TestInstrumentTimeToFirstWrite(t *testing.T) { + var i int + dobs := &responseWriterDelegator{ + ResponseWriter: httptest.NewRecorder(), + observeWriteHeader: func(status int) { + i = status + }, + } + d := newDelegator(dobs, nil) + + d.WriteHeader(http.StatusOK) + + if i != http.StatusOK { + t.Fatalf("failed to execute observeWriteHeader") + } +} + +// testResponseWriter is an http.ResponseWriter that also implements +// http.CloseNotifier, http.Flusher, and io.ReaderFrom. +type testResponseWriter struct { + closeNotifyCalled, flushCalled, readFromCalled bool +} + +func (t *testResponseWriter) Header() http.Header { return nil } +func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil } +func (t *testResponseWriter) WriteHeader(int) {} +func (t *testResponseWriter) CloseNotify() <-chan bool { + t.closeNotifyCalled = true + return nil +} +func (t *testResponseWriter) Flush() { t.flushCalled = true } +func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) { + t.readFromCalled = true + return 0, nil +} + +// testFlusher is an http.ResponseWriter that also implements http.Flusher. +type testFlusher struct { + flushCalled bool +} + +func (t *testFlusher) Header() http.Header { return nil } +func (t *testFlusher) Write([]byte) (int, error) { return 0, nil } +func (t *testFlusher) WriteHeader(int) {} +func (t *testFlusher) Flush() { t.flushCalled = true } + +func TestInterfaceUpgrade(t *testing.T) { + w := &testResponseWriter{} + d := newDelegator(w, nil) + d.(http.CloseNotifier).CloseNotify() + if !w.closeNotifyCalled { + t.Error("CloseNotify not called") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + d.(io.ReaderFrom).ReadFrom(nil) + if !w.readFromCalled { + t.Error("ReadFrom not called") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } + + f := &testFlusher{} + d = newDelegator(f, nil) + if _, ok := d.(http.CloseNotifier); ok { + t.Error("delegator unexpectedly implements http.CloseNotifier") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + if _, ok := d.(io.ReaderFrom); ok { + t.Error("delegator unexpectedly implements io.ReaderFrom") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } +} + +func ExampleInstrumentHandlerDuration() { + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + // duration is partitioned by the HTTP method and handler. It uses custom + // buckets based on the expected request duration. + duration := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of latencies for requests.", + Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, + }, + []string{"handler", "method"}, + ) + + // responseSize has no labels, making it a zero-dimensional + // ObserverVec. + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_size_bytes", + Help: "A histogram of response sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + // Create the handlers that will be wrapped by the middleware. + pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Push")) + }) + pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Pull")) + }) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(inFlightGauge, counter, duration, responseSize) + + // Instrument the handlers with all the metrics, injecting the "handler" + // label by currying. + pushChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "push"}), + InstrumentHandlerCounter(counter, + InstrumentHandlerResponseSize(responseSize, pushHandler), + ), + ), + ) + pullChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}), + InstrumentHandlerCounter(counter, + InstrumentHandlerResponseSize(responseSize, pullHandler), + ), + ), + ) + + http.Handle("/metrics", Handler()) + http.Handle("/push", pushChain) + http.Handle("/pull", pullChain) + + if err := http.ListenAndServe(":3000", nil); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/deprecated.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/deprecated.go new file mode 100644 index 00000000..3d62b572 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/deprecated.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +// This file contains only deprecated code. Remove after v0.9 is released. + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +// FromGatherer triggers a metric collection by the provided Gatherer (which is +// usually implemented by a prometheus.Registry) and pushes all gathered metrics +// to the Pushgateway specified by url, using the provided job name and the +// (optional) further grouping labels (the grouping map may be nil). See the +// Pushgateway documentation for detailed implications of the job and other +// grouping labels. Neither the job name nor any grouping label value may +// contain a "/". The metrics pushed must not contain a job label of their own +// nor any of the grouping labels. +// +// You can use just host:port or ip:port as url, in which case 'http://' is +// added automatically. You can also include the schema in the URL. However, do +// not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and other grouping +// labels will be replaced with the metrics pushed by this call. (It uses HTTP +// method 'PUT' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "PUT") +} + +// AddFromGatherer works like FromGatherer, but only previously pushed metrics +// with the same name (and the same job and other grouping labels) will be +// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "POST") +} + +func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + + if strings.Contains(job, "/") { + return fmt.Errorf("job contains '/': %s", job) + } + urlComponents := []string{url.QueryEscape(job)} + for ln, lv := range grouping { + if !model.LabelName(ln).IsValid() { + return fmt.Errorf("grouping label has invalid name: %s", ln) + } + if strings.Contains(lv, "/") { + return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + } + urlComponents = append(urlComponents, ln, lv) + } + pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + + mfs, err := g.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} + +// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, +// it collects from the provided collectors directly. It is a convenient way to +// push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "PUT", collectors...) +} + +// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. +// Instead, it collects from the provided collectors directly. It is a +// convenient way to push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "POST", collectors...) +} + +func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { + r := prometheus.NewRegistry() + for _, collector := range collectors { + if err := r.Register(collector); err != nil { + return err + } + } + return push(job, grouping, url, r, method) +} + +// HostnameGroupingKey returns a label map with the only entry +// {instance=""}. This can be conveniently used as the grouping +// parameter if metrics should be pushed with the hostname as label. The +// returned map is created upon each call so that the caller is free to add more +// labels to the map. +// +// Deprecated: Usually, metrics pushed to the Pushgateway should not be +// host-centric. (You would use https://github.com/prometheus/node_exporter in +// that case.) If you have the need to add the hostname to the grouping key, you +// are probably doing something wrong. See +// https://prometheus.io/docs/practices/pushing/ for details. +func HostnameGroupingKey() map[string]string { + hostname, err := os.Hostname() + if err != nil { + return map[string]string{"instance": "unknown"} + } + return map[string]string{"instance": hostname} +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/example_add_from_gatherer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/example_add_from_gatherer_test.go new file mode 100644 index 00000000..dd22b526 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/example_add_from_gatherer_test.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +var ( + completionTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last completion of a DB backup, successful or not.", + }) + successTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_success_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + duration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_duration_seconds", + Help: "The duration of the last DB backup in seconds.", + }) + records = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_records_processed", + Help: "The number of records processed in the last DB backup.", + }) +) + +func performBackup() (int, error) { + // Perform the backup and return the number of backed up records and any + // applicable error. + // ... + return 42, nil +} + +func ExamplePusher_Add() { + // We use a registry here to benefit from the consistency checks that + // happen during registration. + registry := prometheus.NewRegistry() + registry.MustRegister(completionTime, duration, records) + // Note that successTime is not registered. + + pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry) + + start := time.Now() + n, err := performBackup() + records.Set(float64(n)) + // Note that time.Since only uses a monotonic clock in Go1.9+. + duration.Set(time.Since(start).Seconds()) + completionTime.SetToCurrentTime() + if err != nil { + fmt.Println("DB backup failed:", err) + } else { + // Add successTime to pusher only in case of success. + // We could as well register it with the registry. + // This example, however, demonstrates that you can + // mix Gatherers and Collectors when handling a Pusher. + pusher.Collector(successTime) + successTime.SetToCurrentTime() + } + // Add is used here rather than Push to not delete a previously pushed + // success timestamp in case of a failure of this backup. + if err := pusher.Add(); err != nil { + fmt.Println("Could not push to Pushgateway:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/examples_test.go new file mode 100644 index 00000000..fa5549a9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/examples_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +func ExamplePusher_Push() { + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + completionTime.SetToCurrentTime() + if err := push.New("http://pushgateway:9091", "db_backup"). + Collector(completionTime). + Grouping("db", "customers"). + Push(); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push.go new file mode 100644 index 00000000..3721ff19 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package push provides functions to push metrics to a Pushgateway. It uses a +// builder approach. Create a Pusher with New and then add the various options +// by using its methods, finally calling Add or Push, like this: +// +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() +// +// See the examples section for more detailed examples. +// +// See the documentation of the Pushgateway to understand the meaning of +// the grouping key and the differences between Push and Add: +// https://github.com/prometheus/pushgateway +package push + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +const contentTypeHeader = "Content-Type" + +// Pusher manages a push to the Pushgateway. Use New to create one, configure it +// with its methods, and finally use the Add or Push method to push. +type Pusher struct { + error error + + url, job string + grouping map[string]string + + gatherers prometheus.Gatherers + registerer prometheus.Registerer + + client *http.Client + useBasicAuth bool + username, password string +} + +// New creates a new Pusher to push to the provided URL with the provided job +// name. You can use just host:port or ip:port as url, in which case “http://” +// is added automatically. Alternatively, include the schema in the +// URL. However, do not include the “/metrics/jobs/…” part. +// +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, a “/” character in the job name is prohibited. +func New(url, job string) *Pusher { + var ( + reg = prometheus.NewRegistry() + err error + ) + if !strings.Contains(url, "://") { + url = "http://" + url + } + if strings.HasSuffix(url, "/") { + url = url[:len(url)-1] + } + if strings.Contains(job, "/") { + err = fmt.Errorf("job contains '/': %s", job) + } + + return &Pusher{ + error: err, + url: url, + job: job, + grouping: map[string]string{}, + gatherers: prometheus.Gatherers{reg}, + registerer: reg, + client: &http.Client{}, + } +} + +// Push collects/gathers all metrics from all Collectors and Gatherers added to +// this Pusher. Then, it pushes them to the Pushgateway configured while +// creating this Pusher, using the configured job name and any added grouping +// labels as grouping key. All previously pushed metrics with the same job and +// other grouping labels will be replaced with the metrics pushed by this +// call. (It uses HTTP method “PUT” to push to the Pushgateway.) +// +// Push returns the first error encountered by any method call (including this +// one) in the lifetime of the Pusher. +func (p *Pusher) Push() error { + return p.push("PUT") +} + +// Add works like push, but only previously pushed metrics with the same name +// (and the same job and other grouping labels) will be replaced. (It uses HTTP +// method “POST” to push to the Pushgateway.) +func (p *Pusher) Add() error { + return p.push("POST") +} + +// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered +// to push them to the Pushgateway. The gathered metrics must not contain a job +// label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { + p.gatherers = append(p.gatherers, g) + return p +} + +// Collector adds a Collector to the Pusher, from which metrics will be +// collected to push them to the Pushgateway. The collected metrics must not +// contain a job label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Collector(c prometheus.Collector) *Pusher { + if p.error == nil { + p.error = p.registerer.Register(c) + } + return p +} + +// Grouping adds a label pair to the grouping key of the Pusher, replacing any +// previously added label pair with the same label name. Note that setting any +// labels in the grouping key that are already contained in the metrics to push +// will lead to an error. +// +// For convenience, this method returns a pointer to the Pusher itself. +// +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, this method does not allow a “/” character in the label value. +func (p *Pusher) Grouping(name, value string) *Pusher { + if p.error == nil { + if !model.LabelName(name).IsValid() { + p.error = fmt.Errorf("grouping label has invalid name: %s", name) + return p + } + if strings.Contains(value, "/") { + p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value) + return p + } + p.grouping[name] = value + } + return p +} + +// Client sets a custom HTTP client for the Pusher. For convenience, this method +// returns a pointer to the Pusher itself. +func (p *Pusher) Client(c *http.Client) *Pusher { + p.client = c + return p +} + +// BasicAuth configures the Pusher to use HTTP Basic Authentication with the +// provided username and password. For convenience, this method returns a +// pointer to the Pusher itself. +func (p *Pusher) BasicAuth(username, password string) *Pusher { + p.useBasicAuth = true + p.username = username + p.password = password + return p +} + +func (p *Pusher) push(method string) error { + if p.error != nil { + return p.error + } + urlComponents := []string{url.QueryEscape(p.job)} + for ln, lv := range p.grouping { + urlComponents = append(urlComponents, ln, lv) + } + pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/")) + + mfs, err := p.gatherers.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := p.grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + if p.useBasicAuth { + req.SetBasicAuth(p.username, p.password) + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := p.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push_test.go new file mode 100644 index 00000000..34ec334b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/push/push_test.go @@ -0,0 +1,194 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestPush(t *testing.T) { + + var ( + lastMethod string + lastBody []byte + lastPath string + ) + + // Fake a Pushgateway that always responds with 202. + pgwOK := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lastMethod = r.Method + var err error + lastBody, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + lastPath = r.URL.EscapedPath() + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + w.WriteHeader(http.StatusAccepted) + }), + ) + defer pgwOK.Close() + + // Fake a Pushgateway that always responds with 500. + pgwErr := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "fake error", http.StatusInternalServerError) + }), + ) + defer pgwErr.Close() + + metric1 := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "testname1", + Help: "testhelp1", + }) + metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testname2", + Help: "testhelp2", + ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, + }) + + reg := prometheus.NewRegistry() + reg.MustRegister(metric1) + reg.MustRegister(metric2) + + mfs, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + t.Fatal(err) + } + } + wantBody := buf.Bytes() + + // Push some Collectors, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // Add some Collectors, with nil grouping, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Add(); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for Add, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // Push some Collectors with a broken PGW. + if err := New(pgwErr.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push to broken Pushgateway succeeded") + } else { + if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { + t.Errorf("got error %q, want %q", got, want) + } + } + + // Push some Collectors with invalid grouping or job. + if err := New(pgwOK.URL, "testjob"). + Grouping("foo", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with grouping contained in metrics succeeded") + } + if err := New(pgwOK.URL, "test/job"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid job value succeeded") + } + if err := New(pgwOK.URL, "testjob"). + Grouping("foobar", "bu/ms"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid grouping succeeded") + } + if err := New(pgwOK.URL, "testjob"). + Grouping("foo-bar", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid grouping succeeded") + } + + // Push registry, all good. + if err := New(pgwOK.URL, "testjob"). + Gatherer(reg). + Push(); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + + // Add registry, all good. + if err := New(pgwOK.URL, "testjob"). + Grouping("a", "x"). + Grouping("b", "y"). + Gatherer(reg). + Add(); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for Add, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { + t.Error("unexpected path:", lastPath) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry.go new file mode 100644 index 00000000..1f9adb80 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry.go @@ -0,0 +1,808 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "sort" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + collectors := make(chan Collector, len(r.collectorsByID)) + for _, collector := range r.collectorsByID { + collectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + } + break collectLoop + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry_test.go new file mode 100644 index 00000000..653df4f7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/registry_test.go @@ -0,0 +1,590 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func testHandler(t testing.TB) { + + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("\xFF"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering: + +collected metric's label constname is not utf8: "\xff" +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector prometheus.Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + { // 16 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; charset=utf-8`, + }, + body: expectedMetricFamilyInvalidLabelValueAsText, + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithInvalidLabelValue, + }, + }, + } + for i, scenario := range scenarios { + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.HeaderMap.Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + t.Errorf( + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + var err error + if err = prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err = prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + if are.ExistingCollector != original { + t.Error("expected original collector but got something else") + } + if are.ExistingCollector == equalButNotSame { + t.Error("expected original callector but got new one") + } + } else { + t.Error("unexpected error:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary.go new file mode 100644 index 00000000..f7dc85b9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary.go @@ -0,0 +1,609 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. While all other fields +// are optional and can safely be left at their zero value, it is recommended to +// explicitly set the Objectives field to the desired value as the default value +// will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary_test.go new file mode 100644 index 00000000..b162ed94 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/summary_test.go @@ -0,0 +1,388 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestSummaryWithDefaultObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithDefaultObjectives := NewSummary(SummaryOpts{ + Name: "default_objectives", + Help: "Test help.", + }) + if err := reg.Register(summaryWithDefaultObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithDefaultObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != len(DefObjectives) { + t.Error("expected default objectives in summary") + } +} + +func TestSummaryWithoutObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithEmptyObjectives := NewSummary(SummaryOpts{ + Name: "empty_objectives", + Help: "Test help.", + Objectives: map[float64]float64{}, + }) + if err := reg.Register(summaryWithEmptyObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithEmptyObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != 0 { + t.Error("expected no objectives in summary") + } +} + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.(Summary).Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer.go new file mode 100644 index 00000000..b8fc5f18 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer_test.go new file mode 100644 index 00000000..29490206 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/timer_test.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestTimerObserve(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{Name: "test_histogram"}) + sum = NewSummary(SummaryOpts{Name: "test_summary"}) + gauge = NewGauge(GaugeOpts{Name: "test_gauge"}) + ) + + func() { + hisTimer := NewTimer(his) + sumTimer := NewTimer(sum) + gaugeTimer := NewTimer(ObserverFunc(gauge.Set)) + defer hisTimer.ObserveDuration() + defer sumTimer.ObserveDuration() + defer gaugeTimer.ObserveDuration() + }() + + m := &dto.Metric{} + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + m.Reset() + sum.Write(m) + if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got { + t.Errorf("want %d observations for summary, got %d", want, got) + } + m.Reset() + gauge.Write(m) + if got := m.GetGauge().GetValue(); got <= 0 { + t.Errorf("want value > 0 for gauge, got %f", got) + } +} + +func TestTimerEmpty(t *testing.T) { + emptyTimer := NewTimer(nil) + emptyTimer.ObserveDuration() + // Do nothing, just demonstrate it works without panic. +} + +func TestTimerConditionalTiming(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{ + Name: "test_histogram", + }) + timeMe = true + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + if timeMe { + his.Observe(v) + } + })) + defer timer.ObserveDuration() + } + + timedFunc() // This will time. + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + + timeMe = false + timedFunc() // This will not time again. + m.Reset() + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } +} + +func TestTimerByOutcome(t *testing.T) { + var ( + his = NewHistogramVec( + HistogramOpts{Name: "test_histogram"}, + []string{"outcome"}, + ) + outcome = "foo" + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + his.WithLabelValues(outcome).Observe(v) + })) + defer timer.ObserveDuration() + + if outcome == "foo" { + outcome = "bar" + return + } + outcome = "foo" + } + + timedFunc() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/untyped.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/untyped.go new file mode 100644 index 00000000..0f9ce63f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value.go new file mode 100644 index 00000000..9fb7eab0 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value_test.go new file mode 100644 index 00000000..eed517e7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/value_test.go @@ -0,0 +1,43 @@ +package prometheus + +import ( + "fmt" + "testing" +) + +func TestNewConstMetricInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + metricDesc := NewDesc( + "sample_value", + "sample value", + []string{"a"}, + Labels{}, + ) + + expectPanic(t, func() { + MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF") + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil { + t.Errorf("NewConstMetric: expected error because: %s", test.desc) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec.go new file mode 100644 index 00000000..14ed9e85 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec_test.go new file mode 100644 index 00000000..bd18a9f4 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/prometheus/vec_test.go @@ -0,0 +1,535 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestDelete(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDelete(t, vec) +} + +func TestDeleteWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDelete(t, vec) +} + +func testDelete(t *testing.T, vec *GaugeVec) { + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDeleteLabelValues(t, vec) +} + +func TestDeleteLabelValuesWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDeleteLabelValues(t, vec) +} + +func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + // Delete out of order. + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestMetricVec(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testMetricVec(t, vec) +} + +func TestMetricVecWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testMetricVec(t, vec) +} + +func testMetricVec(t *testing.T, vec *GaugeVec) { + vec.Reset() // Actually test Reset now! + + var pair [2]string + // Keep track of metrics. + expected := map[[2]string]int{} + + for i := 0; i < 1000; i++ { + pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. + expected[pair]++ + vec.WithLabelValues(pair[0], pair[1]).Inc() + + expected[[2]string{"v1", "v2"}]++ + vec.WithLabelValues("v1", "v2").(Gauge).Inc() + } + + var total int + for _, metrics := range vec.metricMap.metrics { + for _, metric := range metrics { + total++ + copy(pair[:], metric.values) + + var metricOut dto.Metric + if err := metric.metric.Write(&metricOut); err != nil { + t.Fatal(err) + } + actual := *metricOut.Gauge.Value + + var actualPair [2]string + for i, label := range metricOut.Label { + actualPair[i] = *label.Value + } + + // Test output pair against metric.values to ensure we've selected + // the right one. We check this to ensure the below check means + // anything at all. + if actualPair != pair { + t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) + } + + if actual != float64(expected[pair]) { + t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) + } + } + } + + if total != len(expected) { + t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) + } + + vec.Reset() + + if len(vec.metricMap.metrics) > 0 { + t.Fatalf("reset failed") + } +} + +func TestCounterVecEndToEndWithCollision(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"labelname"}, + ) + vec.WithLabelValues("77kepQFQ8Kl").Inc() + vec.WithLabelValues("!0IC=VloaY").Add(2) + + m := &dto.Metric{} + if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 1.; got != want { + t.Errorf("got value %f, want %f", got, want) + } + m.Reset() + if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 2.; got != want { + t.Errorf("got value %f, want %f", got, want) + } +} + +func TestCurryVec(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"one", "two", "three"}, + ) + testCurryVec(t, vec) +} + +func TestCurryVecWithCollisions(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"one", "two", "three"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testCurryVec(t, vec) +} + +func testCurryVec(t *testing.T, vec *CounterVec) { + + assertMetrics := func(t *testing.T) { + n := 0 + for _, m := range vec.metricMap.metrics { + n += len(m) + } + if n != 2 { + t.Error("expected two metrics, got", n) + } + m := &dto.Metric{} + c1, err := vec.GetMetricWithLabelValues("1", "2", "3") + if err != nil { + t.Fatal("unexpected error getting metric:", err) + } + c1.Write(m) + if want, got := 1., m.GetCounter().GetValue(); want != got { + t.Errorf("want %f as counter value, got %f", want, got) + } + m.Reset() + c2, err := vec.GetMetricWithLabelValues("11", "22", "33") + if err != nil { + t.Fatal("unexpected error getting metric:", err) + } + c2.Write(m) + if want, got := 1., m.GetCounter().GetValue(); want != got { + t.Errorf("want %f as counter value, got %f", want, got) + } + } + + assertNoMetric := func(t *testing.T) { + if n := len(vec.metricMap.metrics); n != 0 { + t.Error("expected no metrics, got", n) + } + } + + t.Run("zero labels", func(t *testing.T) { + c1 := vec.MustCurryWith(nil) + c2 := vec.MustCurryWith(nil) + c1.WithLabelValues("1", "2", "3").Inc() + c2.With(Labels{"one": "11", "two": "22", "three": "33"}).Inc() + assertMetrics(t) + if !c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "22", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("first label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"one": "1"}) + c2 := vec.MustCurryWith(Labels{"one": "11"}) + c1.WithLabelValues("2", "3").Inc() + c2.With(Labels{"two": "22", "three": "33"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22", "three": "33"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2", "3") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("middle label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"two": "2"}) + c2 := vec.MustCurryWith(Labels{"two": "22"}) + c1.WithLabelValues("1", "3").Inc() + c2.With(Labels{"one": "11", "three": "33"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"one": "11", "three": "33"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("1", "3") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"one": "1", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("last label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}) + c2 := vec.MustCurryWith(Labels{"three": "33"}) + c1.WithLabelValues("1", "2").Inc() + c2.With(Labels{"one": "11", "two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22", "one": "11"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("1", "2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2", "one": "1"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("two labels", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3", "one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11"}) + c1.WithLabelValues("2").Inc() + c2.With(Labels{"two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("all labels", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3", "two": "2", "one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11", "two": "22"}) + c1.WithLabelValues().Inc() + c2.With(nil).Inc() + assertMetrics(t) + if !c1.Delete(Labels{}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues() { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("double curry", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}).MustCurryWith(Labels{"one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33"}).MustCurryWith(Labels{"one": "11"}) + c1.WithLabelValues("2").Inc() + c2.With(Labels{"two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("use already curried label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}) + if _, err := c1.GetMetricWithLabelValues("1", "2", "3"); err == nil { + t.Error("expected error when using already curried label") + } + if _, err := c1.GetMetricWith(Labels{"one": "1", "two": "2", "three": "3"}); err == nil { + t.Error("expected error when using already curried label") + } + assertNoMetric(t) + c1.WithLabelValues("1", "2").Inc() + if c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"one": "1", "two": "2"}) { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("curry already curried label", func(t *testing.T) { + if _, err := vec.MustCurryWith(Labels{"three": "3"}).CurryWith(Labels{"three": "33"}); err == nil { + t.Error("currying unexpectedly succeeded") + } else if err.Error() != `label name "three" is already curried` { + t.Error("currying returned unexpected error:", err) + } + + }) + t.Run("unknown label", func(t *testing.T) { + if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil { + t.Error("currying unexpectedly succeeded") + } else if err.Error() != "1 unknown label(s) found during currying" { + t.Error("currying returned unexpected error:", err) + } + }) +} + +func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { + benchmarkMetricVecWithLabelValues(b, map[string][]string{ + "l1": {"onevalue"}, + "l2": {"twovalue"}, + }) +} + +func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) +} + +func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) +} + +func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) +} + +func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { + labels := map[string][]string{} + + for i := 0; i < nkeys; i++ { + var ( + k = fmt.Sprintf("key-%v", i) + vs = make([]string, 0, nvalues) + ) + for j := 0; j < nvalues; j++ { + vs = append(vs, fmt.Sprintf("value-%v", j)) + } + labels[k] = vs + } + + benchmarkMetricVecWithLabelValues(b, labels) +} + +func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { + var keys []string + for k := range labels { // Map order dependent, who cares though. + keys = append(keys, k) + } + + values := make([]string, len(labels)) // Value cache for permutations. + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + keys, + ) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Varies input across provide map entries based on key size. + for j, k := range keys { + candidates := labels[k] + values[j] = candidates[i%len(candidates)] + } + + vec.WithLabelValues(values...) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CHANGELOG.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CHANGELOG.md new file mode 100644 index 00000000..330788a4 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CONTRIBUTING.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CONTRIBUTING.md new file mode 100644 index 00000000..40503edb --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/ISSUE_TEMPLATE.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..a456acf5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/ISSUE_TEMPLATE.md @@ -0,0 +1,8 @@ + diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/LICENSE b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/MAINTAINERS.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/MAINTAINERS.md new file mode 100644 index 00000000..3ede55fe --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/MAINTAINERS.md @@ -0,0 +1 @@ +* Björn Rabenstein diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/NOTICE b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/NOTICE new file mode 100644 index 00000000..dd878a30 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/README.md new file mode 100644 index 00000000..17d46eca --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/README.md @@ -0,0 +1,94 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) +[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +__This library requires Go1.7 or later.__ + +## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool + +While our goal is to follow [Semantic Versioning](https://semver.org/), this +repository is still pre-1.0.0. To quote the +[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may +change at any time. The public API should not be considered stable.” We know +that this is at odds with the widespread use of this library. However, just +declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working +towards a 1.0.0 release that actually deserves its major version number. + +Having said that, we aim for always keeping the tip of master in a workable +state and for only introducing ”mildly” breaking changes up to and including +[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that, +a number of ”hard” breaking changes are planned, see the +[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2), +which should get the library much closer to 1.0.0 state. + +Dependency management in Go projects is still in flux, and there are many tools +floating around. While [dep](https://golang.github.io/dep/) might develop into +the de-facto standard tool, it is still officially experimental. The roadmap +for this library has been laid out with a lot of sometimes painful experience +in mind. We really cannot adjust it every other month to the needs of the +currently most popular or most promising Go dependency management tool. The +recommended course of action with dependency management tools is the following: + +- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0 + release. If your dependency management tool expects strict post-1.0.0 semver + semantics, you have to wait. Sorry. +- If you want absolute certainty, please lock to a specific commit. You can + also lock to tags, but please don't ask for more tagging. This would suggest + some release or stability testing procedure that simply is not in place. As + said above, we are aiming for stability of the tip of master, but if we + tagged every single commit, locking to tags would be the same as locking to + commits. +- If you want to get the newer features and improvements and are willing to + take the minor risk of newly introduced bugs and “mild” breakage, just always + update to the tip of master (which is essentially the original idea of Go + dependency management). We recommend to not use features marked as + _deprecated_ in this case. +- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is + out, you could lock to v0.9.x to get bugfixes (and perhaps minor new + features) while avoiding the “hard” breakage that will come with post-0.9 + features. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/VERSION b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/VERSION new file mode 100644 index 00000000..a3df0a69 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client.go new file mode 100644 index 00000000..bf267246 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package api provides clients for the HTTP APIs. +package api + +import ( + "context" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +// DefaultRoundTripper is used if no RoundTripper is set in Config. +var DefaultRoundTripper http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// Config defines configuration parameters for a new client. +type Config struct { + // The address of the Prometheus to connect to. + Address string + + // RoundTripper is used by the Client to drive HTTP requests. If not + // provided, DefaultRoundTripper will be used. + RoundTripper http.RoundTripper +} + +func (cfg *Config) roundTripper() http.RoundTripper { + if cfg.RoundTripper == nil { + return DefaultRoundTripper + } + return cfg.RoundTripper +} + +// Client is the interface for an API client. +type Client interface { + URL(ep string, args map[string]string) *url.URL + Do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// NewClient returns a new Client. +// +// It is safe to use the returned Client from multiple goroutines. +func NewClient(cfg Config) (Client, error) { + u, err := url.Parse(cfg.Address) + if err != nil { + return nil, err + } + u.Path = strings.TrimRight(u.Path, "/") + + return &httpClient{ + endpoint: u, + client: http.Client{Transport: cfg.roundTripper()}, + }, nil +} + +type httpClient struct { + endpoint *url.URL + client http.Client +} + +func (c *httpClient) URL(ep string, args map[string]string) *url.URL { + p := path.Join(c.endpoint.Path, ep) + + for arg, val := range args { + arg = ":" + arg + p = strings.Replace(p, arg, val, -1) + } + + u := *c.endpoint + u.Path = p + + return &u +} + +func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx != nil { + req = req.WithContext(ctx) + } + resp, err := c.client.Do(req) + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + close(done) + }() + + select { + case <-ctx.Done(): + err = resp.Body.Close() + <-done + if err == nil { + err = ctx.Err() + } + case <-done: + } + + return resp, body, err +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client_test.go new file mode 100644 index 00000000..b1bcfc9b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/client_test.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package api + +import ( + "net/http" + "net/url" + "testing" +) + +func TestConfig(t *testing.T) { + c := Config{} + if c.roundTripper() != DefaultRoundTripper { + t.Fatalf("expected default roundtripper for nil RoundTripper field") + } +} + +func TestClientURL(t *testing.T) { + tests := []struct { + address string + endpoint string + args map[string]string + expected string + }{ + { + address: "http://localhost:9090", + endpoint: "/test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost", + endpoint: "/test", + expected: "http://localhost/test", + }, + { + address: "http://localhost:9090", + endpoint: "test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost:9090/prefix", + endpoint: "/test", + expected: "http://localhost:9090/prefix/test", + }, + { + address: "https://localhost:9090/", + endpoint: "/test/", + expected: "https://localhost:9090/test", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content/more/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:foo", + args: map[string]string{ + "param": "content", + "foo": "bar", + }, + expected: "http://localhost:9090/test/content/more/bar", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "nonexistent": "content", + }, + expected: "http://localhost:9090/test/:param", + }, + } + + for _, test := range tests { + ep, err := url.Parse(test.address) + if err != nil { + t.Fatal(err) + } + + hclient := &httpClient{ + endpoint: ep, + client: http.Client{Transport: DefaultRoundTripper}, + } + + u := hclient.URL(test.endpoint, test.args) + if u.String() != test.expected { + t.Errorf("unexpected result: got %s, want %s", u, test.expected) + continue + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api.go new file mode 100644 index 00000000..c41c0cf5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api.go @@ -0,0 +1,497 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package v1 provides bindings to the Prometheus HTTP API v1: +// http://prometheus.io/docs/querying/api/ +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/common/model" +) + +const ( + statusAPIError = 422 + + apiPrefix = "/api/v1" + + epAlertManagers = apiPrefix + "/alertmanagers" + epQuery = apiPrefix + "/query" + epQueryRange = apiPrefix + "/query_range" + epLabelValues = apiPrefix + "/label/:name/values" + epSeries = apiPrefix + "/series" + epTargets = apiPrefix + "/targets" + epSnapshot = apiPrefix + "/admin/tsdb/snapshot" + epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series" + epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones" + epConfig = apiPrefix + "/status/config" + epFlags = apiPrefix + "/status/flags" +) + +// ErrorType models the different API error types. +type ErrorType string + +// HealthStatus models the health status of a scrape target. +type HealthStatus string + +const ( + // Possible values for ErrorType. + ErrBadData ErrorType = "bad_data" + ErrTimeout = "timeout" + ErrCanceled = "canceled" + ErrExec = "execution" + ErrBadResponse = "bad_response" + + // Possible values for HealthStatus. + HealthGood HealthStatus = "up" + HealthUnknown HealthStatus = "unknown" + HealthBad HealthStatus = "down" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +// API provides bindings for Prometheus's v1 API. +type API interface { + // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. + AlertManagers(ctx context.Context) (AlertManagersResult, error) + // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. + CleanTombstones(ctx context.Context) error + // Config returns the current Prometheus configuration. + Config(ctx context.Context) (ConfigResult, error) + // DeleteSeries deletes data for a selection of series in a time range. + DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error + // Flags returns the flag values that Prometheus was launched with. + Flags(ctx context.Context) (FlagsResult, error) + // LabelValues performs a query for the values of the given label. + LabelValues(ctx context.Context, label string) (model.LabelValues, error) + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, error) + // QueryRange performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, error) + // Series finds series by label matchers. + Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) + // Snapshot creates a snapshot of all current data into snapshots/- + // under the TSDB's data directory and returns the directory as response. + Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) + // Targets returns an overview of the current state of the Prometheus target discovery. + Targets(ctx context.Context) (TargetsResult, error) +} + +// AlertManagersResult contains the result from querying the alertmanagers endpoint. +type AlertManagersResult struct { + Active []AlertManager `json:"activeAlertManagers"` + Dropped []AlertManager `json:"droppedAlertManagers"` +} + +// AlertManager models a configured Alert Manager. +type AlertManager struct { + URL string `json:"url"` +} + +// ConfigResult contains the result from querying the config endpoint. +type ConfigResult struct { + YAML string `json:"yaml"` +} + +// FlagsResult contains the result from querying the flag endpoint. +type FlagsResult map[string]string + +// SnapshotResult contains the result from querying the snapshot endpoint. +type SnapshotResult struct { + Name string `json:"name"` +} + +// TargetsResult contains the result from querying the targets endpoint. +type TargetsResult struct { + Active []ActiveTarget `json:"activeTargets"` + Dropped []DroppedTarget `json:"droppedTargets"` +} + +// ActiveTarget models an active Prometheus scrape target. +type ActiveTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` + Labels model.LabelSet `json:"labels"` + ScrapeURL string `json:"scrapeUrl"` + LastError string `json:"lastError"` + LastScrape time.Time `json:"lastScrape"` + Health HealthStatus `json:"health"` +} + +// DroppedTarget models a dropped Prometheus scrape target. +type DroppedTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// NewAPI returns a new API for the client. +// +// It is safe to use the returned API from multiple goroutines. +func NewAPI(c api.Client) API { + return &httpAPI{client: apiClient{c}} +} + +type httpAPI struct { + client api.Client +} + +func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { + u := h.client.URL(epAlertManagers, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return AlertManagersResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return AlertManagersResult{}, err + } + + var res AlertManagersResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) CleanTombstones(ctx context.Context) error { + u := h.client.URL(epCleanTombstones, nil) + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { + u := h.client.URL(epConfig, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ConfigResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return ConfigResult{}, err + } + + var res ConfigResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { + u := h.client.URL(epDeleteSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { + u := h.client.URL(epFlags, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return FlagsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return FlagsResult{}, err + } + + var res FlagsResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) { + u := h.client.URL(epLabelValues, map[string]string{"name": label}) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + var labelValues model.LabelValues + err = json.Unmarshal(body, &labelValues) + return labelValues, err +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { + u := h.client.URL(epQuery, nil) + q := u.Query() + + q.Set("query", query) + if !ts.IsZero() { + q.Set("time", ts.Format(time.RFC3339Nano)) + } + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { + u := h.client.URL(epQueryRange, nil) + q := u.Query() + + var ( + start = r.Start.Format(time.RFC3339Nano) + end = r.End.Format(time.RFC3339Nano) + step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) + ) + + q.Set("query", query) + q.Set("start", start) + q.Set("end", end) + q.Set("step", step) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) { + u := h.client.URL(epSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var mset []model.LabelSet + err = json.Unmarshal(body, &mset) + return mset, err +} + +func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { + u := h.client.URL(epSnapshot, nil) + q := u.Query() + + q.Set("skip_head", strconv.FormatBool(skipHead)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return SnapshotResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return SnapshotResult{}, err + } + + var res SnapshotResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { + u := h.client.URL(epTargets, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return TargetsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return TargetsResult{}, err + } + + var res TargetsResult + err = json.Unmarshal(body, &res) + return res, err +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` +} + +func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, body, err := c.Client.Do(ctx, req) + if err != nil { + return resp, body, err + } + + code := resp.StatusCode + + if code/100 != 2 && code != statusAPIError { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: fmt.Sprintf("bad response code %d", resp.StatusCode), + } + } + + var result apiResponse + + if err = json.Unmarshal(body, &result); err != nil { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: err.Error(), + } + } + + if (code == statusAPIError) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if code == statusAPIError && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), err +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api_test.go new file mode 100644 index 00000000..2fe60445 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/api/prometheus/v1/api_test.go @@ -0,0 +1,693 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "github.com/prometheus/common/model" +) + +type apiTest struct { + do func() (interface{}, error) + inErr error + inRes interface{} + + reqPath string + reqParam url.Values + reqMethod string + res interface{} + err error +} + +type apiTestClient struct { + *testing.T + curTest apiTest +} + +func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL { + path := ep + for k, v := range args { + path = strings.Replace(path, ":"+k, v, -1) + } + u := &url.URL{ + Host: "test:9090", + Path: path, + } + return u +} + +func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + + test := c.curTest + + if req.URL.Path != test.reqPath { + c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) + } + if req.Method != test.reqMethod { + c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) + } + + b, err := json.Marshal(test.inRes) + if err != nil { + c.Fatal(err) + } + + resp := &http.Response{} + if test.inErr != nil { + resp.StatusCode = statusAPIError + } else { + resp.StatusCode = http.StatusOK + } + + return resp, b, test.inErr +} + +func TestAPIs(t *testing.T) { + + testTime := time.Now() + + client := &apiTestClient{T: t} + + promAPI := &httpAPI{ + client: client, + } + + doAlertManagers := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.AlertManagers(context.Background()) + } + } + + doCleanTombstones := func() func() (interface{}, error) { + return func() (interface{}, error) { + return nil, promAPI.CleanTombstones(context.Background()) + } + } + + doConfig := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Config(context.Background()) + } + } + + doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime) + } + } + + doFlags := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Flags(context.Background()) + } + } + + doLabelValues := func(label string) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.LabelValues(context.Background(), label) + } + } + + doQuery := func(q string, ts time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Query(context.Background(), q, ts) + } + } + + doQueryRange := func(q string, rng Range) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.QueryRange(context.Background(), q, rng) + } + } + + doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime) + } + } + + doSnapshot := func(skipHead bool) func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Snapshot(context.Background(), skipHead) + } + } + + doTargets := func() func() (interface{}, error) { + return func() (interface{}, error) { + return promAPI.Targets(context.Background()) + } + } + + queryTests := []apiTest{ + { + do: doQuery("2", testTime), + inRes: &queryResult{ + Type: model.ValScalar, + Result: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + { + do: doQuery("2", testTime), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doQueryRange("2", Range{ + Start: testTime.Add(-time.Minute), + End: testTime, + Step: time.Minute, + }), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query_range", + reqParam: url.Values{ + "query": []string{"2"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + "step": []string{time.Minute.String()}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doLabelValues("mylabel"), + inRes: []string{"val1", "val2"}, + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + res: model.LabelValues{"val1", "val2"}, + }, + + { + do: doLabelValues("mylabel"), + inErr: fmt.Errorf("some error"), + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + err: fmt.Errorf("some error"), + }, + + { + do: doSeries("up", testTime.Add(-time.Minute), testTime), + inRes: []map[string]string{ + { + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090"}, + }, + reqMethod: "GET", + reqPath: "/api/v1/series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: []model.LabelSet{ + model.LabelSet{ + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090", + }, + }, + }, + + { + do: doSeries("up", testTime.Add(-time.Minute), testTime), + inErr: fmt.Errorf("some error"), + reqMethod: "GET", + reqPath: "/api/v1/series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doSnapshot(true), + inRes: map[string]string{ + "name": "20171210T211224Z-2be650b6d019eb54", + }, + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/snapshot", + reqParam: url.Values{ + "skip_head": []string{"true"}, + }, + res: SnapshotResult{ + Name: "20171210T211224Z-2be650b6d019eb54", + }, + }, + + { + do: doSnapshot(true), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/snapshot", + err: fmt.Errorf("some error"), + }, + + { + do: doCleanTombstones(), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/clean_tombstones", + }, + + { + do: doCleanTombstones(), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/clean_tombstones", + err: fmt.Errorf("some error"), + }, + + { + do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), + inRes: []map[string]string{ + { + "__name__": "up", + "job": "prometheus", + "instance": "localhost:9090"}, + }, + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/delete_series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + }, + + { + do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), + inErr: fmt.Errorf("some error"), + reqMethod: "POST", + reqPath: "/api/v1/admin/tsdb/delete_series", + reqParam: url.Values{ + "match": []string{"up"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doConfig(), + reqMethod: "GET", + reqPath: "/api/v1/status/config", + inRes: map[string]string{ + "yaml": "", + }, + res: ConfigResult{ + YAML: "", + }, + }, + + { + do: doConfig(), + reqMethod: "GET", + reqPath: "/api/v1/status/config", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doFlags(), + reqMethod: "GET", + reqPath: "/api/v1/status/flags", + inRes: map[string]string{ + "alertmanager.notification-queue-capacity": "10000", + "alertmanager.timeout": "10s", + "log.level": "info", + "query.lookback-delta": "5m", + "query.max-concurrency": "20", + }, + res: FlagsResult{ + "alertmanager.notification-queue-capacity": "10000", + "alertmanager.timeout": "10s", + "log.level": "info", + "query.lookback-delta": "5m", + "query.max-concurrency": "20", + }, + }, + + { + do: doFlags(), + reqMethod: "GET", + reqPath: "/api/v1/status/flags", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doAlertManagers(), + reqMethod: "GET", + reqPath: "/api/v1/alertmanagers", + inRes: map[string]interface{}{ + "activeAlertManagers": []map[string]string{ + { + "url": "http://127.0.0.1:9091/api/v1/alerts", + }, + }, + "droppedAlertManagers": []map[string]string{ + { + "url": "http://127.0.0.1:9092/api/v1/alerts", + }, + }, + }, + res: AlertManagersResult{ + Active: []AlertManager{ + { + URL: "http://127.0.0.1:9091/api/v1/alerts", + }, + }, + Dropped: []AlertManager{ + { + URL: "http://127.0.0.1:9092/api/v1/alerts", + }, + }, + }, + }, + + { + do: doAlertManagers(), + reqMethod: "GET", + reqPath: "/api/v1/alertmanagers", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + + { + do: doTargets(), + reqMethod: "GET", + reqPath: "/api/v1/targets", + inRes: map[string]interface{}{ + "activeTargets": []map[string]interface{}{ + { + "discoveredLabels": map[string]string{ + "__address__": "127.0.0.1:9090", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "prometheus", + }, + "labels": map[string]string{ + "instance": "127.0.0.1:9090", + "job": "prometheus", + }, + "scrapeUrl": "http://127.0.0.1:9090", + "lastError": "error while scraping target", + "lastScrape": testTime.UTC().Format(time.RFC3339Nano), + "health": "up", + }, + }, + "droppedTargets": []map[string]interface{}{ + { + "discoveredLabels": map[string]string{ + "__address__": "127.0.0.1:9100", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node", + }, + }, + }, + }, + res: TargetsResult{ + Active: []ActiveTarget{ + { + DiscoveredLabels: model.LabelSet{ + "__address__": "127.0.0.1:9090", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "prometheus", + }, + Labels: model.LabelSet{ + "instance": "127.0.0.1:9090", + "job": "prometheus", + }, + ScrapeURL: "http://127.0.0.1:9090", + LastError: "error while scraping target", + LastScrape: testTime.UTC(), + Health: HealthGood, + }, + }, + Dropped: []DroppedTarget{ + { + DiscoveredLabels: model.LabelSet{ + "__address__": "127.0.0.1:9100", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node", + }, + }, + }, + }, + }, + + { + do: doTargets(), + reqMethod: "GET", + reqPath: "/api/v1/targets", + inErr: fmt.Errorf("some error"), + err: fmt.Errorf("some error"), + }, + } + + var tests []apiTest + tests = append(tests, queryTests...) + + for _, test := range tests { + client.curTest = test + + res, err := test.do() + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if err.Error() != test.err.Error() { + t.Errorf("unexpected error: want %s, got %s", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpected error: %s", err) + continue + } + + if !reflect.DeepEqual(res, test.res) { + t.Errorf("unexpected result: want %v, got %v", test.res, res) + } + } +} + +type testClient struct { + *testing.T + + ch chan apiClientTest + req *http.Request +} + +type apiClientTest struct { + code int + response interface{} + expected string + err *Error +} + +func (c *testClient) URL(ep string, args map[string]string) *url.URL { + return nil +} + +func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx == nil { + c.Fatalf("context was not passed down") + } + if req != c.req { + c.Fatalf("request was not passed down") + } + + test := <-c.ch + + var b []byte + var err error + + switch v := test.response.(type) { + case string: + b = []byte(v) + default: + b, err = json.Marshal(v) + if err != nil { + c.Fatal(err) + } + } + + resp := &http.Response{ + StatusCode: test.code, + } + + return resp, b, nil +} + +func TestAPIClientDo(t *testing.T) { + tests := []apiClientTest{ + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`null`), + ErrorType: ErrBadData, + Error: "failed", + }, + err: &Error{ + Type: ErrBadData, + Msg: "failed", + }, + code: statusAPIError, + expected: `null`, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrTimeout, + Msg: "timed out", + }, + code: statusAPIError, + expected: `test`, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "bad response code 400", + }, + code: http.StatusBadRequest, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "invalid character 'b' looking for beginning of value", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: http.StatusOK, + }, + } + + tc := &testClient{ + T: t, + ch: make(chan apiClientTest, 1), + req: &http.Request{}, + } + client := &apiClient{tc} + + for _, test := range tests { + + tc.ch <- test + + _, body, err := client.Do(context.Background(), tc.req) + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if test.err.Error() != err.Error() { + t.Errorf("unexpected error: want %q, got %q", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpeceted error %s", err) + continue + } + + want, got := test.expected, string(body) + if want != got { + t.Errorf("unexpected body: want %q, got %q", want, got) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/Dockerfile b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/Dockerfile new file mode 100644 index 00000000..32b6846e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/Dockerfile @@ -0,0 +1,20 @@ +# This Dockerfile builds an image for a client_golang example. +# +# Use as (from the root for the client_golang repository): +# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name . + +# Builder image, where we build the example. +FROM golang:1.9.0 AS builder +WORKDIR /go/src/github.com/prometheus/client_golang +COPY . . +WORKDIR /go/src/github.com/prometheus/client_golang/prometheus +RUN go get -d +WORKDIR /go/src/github.com/prometheus/client_golang/examples/random +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' + +# Final image. +FROM scratch +LABEL maintainer "The Prometheus Authors " +COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random . +EXPOSE 8080 +ENTRYPOINT ["/random"] diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/main.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/main.go new file mode 100644 index 00000000..eef50d20 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/random/main.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A simple example exposing fictional RPC latencies with different types of +// random distributions (uniform, normal, and exponential) as Prometheus +// metrics. +package main + +import ( + "flag" + "log" + "math" + "math/rand" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.") + normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.") + normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.") + oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") +) + +var ( + // Create a summary to track fictional interservice RPC latencies for three + // distinct services with different latency distributions. These services are + // differentiated via a "service" label. + rpcDurations = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "rpc_durations_seconds", + Help: "RPC latency distributions.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"service"}, + ) + // The same as above, but now as a histogram, and only for the normal + // distribution. The buckets are targeted to the parameters of the + // normal distribution, with 20 buckets centered on the mean, each + // half-sigma wide. + rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + }) +) + +func init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister(rpcDurations) + prometheus.MustRegister(rpcDurationsHistogram) +} + +func main() { + flag.Parse() + + start := time.Now() + + oscillationFactor := func() float64 { + return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) + } + + // Periodically record some sample latencies for the three services. + go func() { + for { + v := rand.Float64() * *uniformDomain + rpcDurations.WithLabelValues("uniform").Observe(v) + time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := (rand.NormFloat64() * *normDomain) + *normMean + rpcDurations.WithLabelValues("normal").Observe(v) + rpcDurationsHistogram.Observe(v) + time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := rand.ExpFloat64() / 1e6 + rpcDurations.WithLabelValues("exponential").Observe(v) + time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) + } + }() + + // Expose the registered metrics via HTTP. + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/Dockerfile b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/Dockerfile new file mode 100644 index 00000000..99b49d78 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/Dockerfile @@ -0,0 +1,20 @@ +# This Dockerfile builds an image for a client_golang example. +# +# Use as (from the root for the client_golang repository): +# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name . + +# Builder image, where we build the example. +FROM golang:1.9.0 AS builder +WORKDIR /go/src/github.com/prometheus/client_golang +COPY . . +WORKDIR /go/src/github.com/prometheus/client_golang/prometheus +RUN go get -d +WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' + +# Final image. +FROM scratch +LABEL maintainer "The Prometheus Authors " +COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/simple . +EXPOSE 8080 +ENTRYPOINT ["/simple"] diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/main.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/main.go new file mode 100644 index 00000000..1fc23249 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/examples/simple/main.go @@ -0,0 +1,31 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A minimal example of how to include Prometheus instrumentation. +package main + +import ( + "flag" + "log" + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + +func main() { + flag.Parse() + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/README.md new file mode 100644 index 00000000..44986bff --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/benchmark_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/benchmark_test.go new file mode 100644 index 00000000..4a05721d --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/benchmark_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkParallelCounter(b *testing.B) { + c := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A Counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + c.Inc() + } + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/collector.go new file mode 100644 index 00000000..623d3d83 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter.go new file mode 100644 index 00000000..765e4550 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter_test.go new file mode 100644 index 00000000..5062f51a --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/counter_test.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + counter.Add(42) + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(43), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + counter.Add(24.42) + if expected, got := 24.42, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(43), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} + +func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + counterVec := NewCounterVec(CounterOpts{ + Name: "test", + }, []string{"a"}) + + labelValues := make([]string, len(test.labels)) + for _, val := range test.labels { + labelValues = append(labelValues, val) + } + + expectPanic(t, func() { + counterVec.WithLabelValues(labelValues...) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + expectPanic(t, func() { + counterVec.With(test.labels) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil { + t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc) + } + if _, err := counterVec.GetMetricWith(test.labels); err == nil { + t.Errorf("GetMetricWith: expected error because: %s", test.desc) + } + } +} + +func expectPanic(t *testing.T, op func(), errorMsg string) { + defer func() { + if err := recover(); err == nil { + t.Error(errorMsg) + } + }() + + op() +} + +func TestCounterAddInf(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + + counter.Inc() + if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + counter.Add(math.Inf(1)) + if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(1), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + counter.Inc() + if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + if expected, got := uint64(2), counter.valInt; expected != got { + t.Errorf("Expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestCounterAddLarge(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + + // large overflows the underlying type and should therefore be stored in valBits. + large := float64(math.MaxUint64 + 1) + counter.Add(large) + if expected, got := large, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(0), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := fmt.Sprintf("counter: ", large), m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestCounterAddSmall(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + }).(*counter) + small := 0.000000000001 + counter.Add(small) + if expected, got := small, math.Float64frombits(counter.valBits); expected != got { + t.Errorf("valBits expected %f, got %f.", expected, got) + } + if expected, got := uint64(0), counter.valInt; expected != got { + t.Errorf("valInts expected %d, got %d.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := fmt.Sprintf("counter: ", small), m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc.go new file mode 100644 index 00000000..4a755b0f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc_test.go new file mode 100644 index 00000000..2f962652 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/desc_test.go @@ -0,0 +1,17 @@ +package prometheus + +import ( + "testing" +) + +func TestNewDescInvalidLabelValues(t *testing.T) { + desc := NewDesc( + "sample_label", + "sample label", + nil, + Labels{"a": "\xFF"}, + ) + if desc.err == nil { + t.Errorf("NewDesc: expected error because: %s", desc.err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/doc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/doc.go new file mode 100644 index 00000000..83c3657d --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/doc.go @@ -0,0 +1,191 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_clustermanager_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_clustermanager_test.go new file mode 100644 index 00000000..260c1b52 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_clustermanager_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import "github.com/prometheus/client_golang/prometheus" + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe simply sends the two Descs in the struct to the channel. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + for host, oomCount := range oomCountByHost { + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) + } + for host, ramUsage := range ramUsageByHost { + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) + } +} + +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + } +} + +func ExampleCollector() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_complex_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_complex_test.go new file mode 100644 index 00000000..c5e7de5e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_complex_test.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // apiRequestDuration tracks the duration separate for each HTTP status + // class (1xx, 2xx, ...). This creates a fair amount of time series on + // the Prometheus server. Usually, you would track the duration of + // serving HTTP request without partitioning by outcome. Do something + // like this only if needed. Also note how only status classes are + // tracked, not every single status code. The latter would create an + // even larger amount of time series. Request counters partitioned by + // status code are usually OK as each counter only creates one time + // series. Histograms are way more expensive, so partition with care and + // only where you really need separate latency tracking. Partitioning by + // status class is only an example. In concrete cases, other partitions + // might make more sense. + apiRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "api_request_duration_seconds", + Help: "Histogram for the request duration of the public API, partitioned by status class.", + Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), + }, + []string{"status_class"}, + ) +) + +func handler(w http.ResponseWriter, r *http.Request) { + status := http.StatusOK + // The ObserverFunc gets called by the deferred ObserveDuration and + // decides which Histogram's Observe method is called. + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + switch { + case status >= 500: // Server error. + apiRequestDuration.WithLabelValues("5xx").Observe(v) + case status >= 400: // Client error. + apiRequestDuration.WithLabelValues("4xx").Observe(v) + case status >= 300: // Redirection. + apiRequestDuration.WithLabelValues("3xx").Observe(v) + case status >= 200: // Success. + apiRequestDuration.WithLabelValues("2xx").Observe(v) + default: // Informational. + apiRequestDuration.WithLabelValues("1xx").Observe(v) + } + })) + defer timer.ObserveDuration() + + // Handle the request. Set status accordingly. + // ... +} + +func ExampleTimer_complex() { + http.HandleFunc("/api", handler) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_gauge_test.go new file mode 100644 index 00000000..7184a0d1 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_gauge_test.go @@ -0,0 +1,48 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "os" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // If a function is called rarely (i.e. not more often than scrapes + // happen) or ideally only once (like in a batch job), it can make sense + // to use a Gauge for timing the function call. For timing a batch job + // and pushing the result to a Pushgateway, see also the comprehensive + // example in the push package. + funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "example_function_duration_seconds", + Help: "Duration of the last call of an example function.", + }) +) + +func run() error { + // The Set method of the Gauge is used to observe the duration. + timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set)) + defer timer.ObserveDuration() + + // Do something. Return errors as encountered. The use of 'defer' above + // makes sure the function is still timed properly. + return nil +} + +func ExampleTimer_gauge() { + if err := run(); err != nil { + os.Exit(1) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_test.go new file mode 100644 index 00000000..bd86bb47 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/example_timer_test.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "math/rand" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "example_request_duration_seconds", + Help: "Histogram for the runtime of a simple example function.", + Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), + }) +) + +func ExampleTimer() { + // timer times this example function. It uses a Histogram, but a Summary + // would also work, as both implement Observer. Check out + // https://prometheus.io/docs/practices/histograms/ for differences. + timer := prometheus.NewTimer(requestDuration) + defer timer.ObserveDuration() + + // Do something here that takes time. + time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/examples_test.go new file mode 100644 index 00000000..45f60650 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/examples_test.go @@ -0,0 +1,754 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "bytes" + "fmt" + "math" + "net/http" + "runtime" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + {Name: proto.String("status"), Value: proto.String("404")}, + {Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + fmt.Println(proto.MarshalTextString(metricFamilies[0])) + + // Output: + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > +} + +func ExampleNewConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleNewConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", + }) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } + } + reqCounter.Inc() +} + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector.go new file mode 100644 index 00000000..18a99d5f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector_test.go new file mode 100644 index 00000000..6bcd9b69 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/expvar_collector_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleNewExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if !strings.Contains(m.Desc().String(), "expvar_memstats") { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/fnv.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/fnv.go new file mode 100644 index 00000000..e3b67df8 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge.go new file mode 100644 index 00000000..17c72d7e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge_test.go new file mode 100644 index 00000000..a2e3c141 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/gauge_test.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*gauge).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*gauge).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestGaugeSetCurrentTime(t *testing.T) { + g := NewGauge(GaugeOpts{ + Name: "test_name", + Help: "test help", + }) + g.SetToCurrentTime() + unixTime := float64(time.Now().Unix()) + + m := &dto.Metric{} + g.Write(m) + + delta := unixTime - m.GetGauge().GetValue() + // This is just a smoke test to make sure SetToCurrentTime is not + // totally off. Tests with current time involved are hard... + if math.Abs(delta) > 5 { + t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector.go new file mode 100644 index 00000000..0440bd12 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector.go @@ -0,0 +1,288 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This causes a stop-the-world, which is very short with Go1.9+ +// (~25µs). However, with older Go versions, the stop-the-world duration depends +// on the heap size and can be quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector_test.go new file mode 100644 index 00000000..72264da9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/go_collector_test.go @@ -0,0 +1,123 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case m := <-ch: + // m can be Gauge or Counter, + // currently just test the go_goroutines Gauge + // and ignore others. + if m.Desc().fqName != "go_goroutines" { + continue + } + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs three sends per call. + // On line 27 we need to receive three more sends + // to shut down cleanly. + <-ch + <-ch + <-ch + return + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + pb := &dto.Metric{} + metric.Write(pb) + if pb.GetSummary() == nil { + continue + } + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge.go new file mode 100644 index 00000000..11533374 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge.go @@ -0,0 +1,280 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graphite provides a bridge to push Prometheus metrics to a Graphite +// server. +package graphite + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "sort" + "time" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultInterval = 15 * time.Second + millisecondsPerSecond = 1000 +) + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Ignore errors and try to push as many metrics to Graphite as possible. + ContinueOnError HandlerErrorHandling = iota + + // Abort the push to Graphite upon the first error encountered. + AbortOnError +) + +// Config defines the Graphite bridge config. +type Config struct { + // The url to push data to. Required. + URL string + + // The prefix for the pushed Graphite metrics. Defaults to empty string. + Prefix string + + // The interval to use for pushing data to Graphite. Defaults to 15 seconds. + Interval time.Duration + + // The timeout for pushing metrics to Graphite. Defaults to 15 seconds. + Timeout time.Duration + + // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer. + Gatherer prometheus.Gatherer + + // The logger that messages are written to. Defaults to no logging. + Logger Logger + + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided Logger + // is not nil. + ErrorHandling HandlerErrorHandling +} + +// Bridge pushes metrics to the configured Graphite server. +type Bridge struct { + url string + prefix string + interval time.Duration + timeout time.Duration + + errorHandling HandlerErrorHandling + logger Logger + + g prometheus.Gatherer +} + +// Logger is the minimal interface Bridge needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// NewBridge returns a pointer to a new Bridge struct. +func NewBridge(c *Config) (*Bridge, error) { + b := &Bridge{} + + if c.URL == "" { + return nil, errors.New("missing URL") + } + b.url = c.URL + + if c.Gatherer == nil { + b.g = prometheus.DefaultGatherer + } else { + b.g = c.Gatherer + } + + if c.Logger != nil { + b.logger = c.Logger + } + + if c.Prefix != "" { + b.prefix = c.Prefix + } + + var z time.Duration + if c.Interval == z { + b.interval = defaultInterval + } else { + b.interval = c.Interval + } + + if c.Timeout == z { + b.timeout = defaultInterval + } else { + b.timeout = c.Timeout + } + + b.errorHandling = c.ErrorHandling + + return b, nil +} + +// Run starts the event loop that pushes Prometheus metrics to Graphite at the +// configured interval. +func (b *Bridge) Run(ctx context.Context) { + ticker := time.NewTicker(b.interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := b.Push(); err != nil && b.logger != nil { + b.logger.Println("error pushing to Graphite:", err) + } + case <-ctx.Done(): + return + } + } +} + +// Push pushes Prometheus metrics to the configured Graphite server. +func (b *Bridge) Push() error { + mfs, err := b.g.Gather() + if err != nil || len(mfs) == 0 { + switch b.errorHandling { + case AbortOnError: + return err + case ContinueOnError: + if b.logger != nil { + b.logger.Println("continue on error:", err) + } + default: + panic("unrecognized error handling value") + } + } + + conn, err := net.DialTimeout("tcp", b.url, b.timeout) + if err != nil { + return err + } + defer conn.Close() + + return writeMetrics(conn, mfs, b.prefix, model.Now()) +} + +func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error { + vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{ + Timestamp: now, + }, mfs...) + if err != nil { + return err + } + + buf := bufio.NewWriter(w) + for _, s := range vec { + if err := writeSanitized(buf, prefix); err != nil { + return err + } + if err := buf.WriteByte('.'); err != nil { + return err + } + if err := writeMetric(buf, s.Metric); err != nil { + return err + } + if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil { + return err + } + if err := buf.Flush(); err != nil { + return err + } + } + + return nil +} + +func writeMetric(buf *bufio.Writer, m model.Metric) error { + metricName, hasName := m[model.MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != model.MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value))) + } + } + + var err error + switch numLabels { + case 0: + if hasName { + return writeSanitized(buf, string(metricName)) + } + default: + sort.Strings(labelStrings) + if err = writeSanitized(buf, string(metricName)); err != nil { + return err + } + for _, s := range labelStrings { + if err = buf.WriteByte('.'); err != nil { + return err + } + if err = writeSanitized(buf, s); err != nil { + return err + } + } + } + return nil +} + +func writeSanitized(buf *bufio.Writer, s string) error { + prevUnderscore := false + + for _, c := range s { + c = replaceInvalidRune(c) + if c == '_' { + if prevUnderscore { + continue + } + prevUnderscore = true + } else { + prevUnderscore = false + } + if _, err := buf.WriteRune(c); err != nil { + return err + } + } + + return nil +} + +func replaceInvalidRune(c rune) rune { + if c == ' ' { + return '.' + } + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) { + return '_' + } + return c +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge_test.go new file mode 100644 index 00000000..c2b274c6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/graphite/bridge_test.go @@ -0,0 +1,309 @@ +package graphite + +import ( + "bufio" + "bytes" + "io" + "log" + "net" + "os" + "regexp" + "testing" + "time" + + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestSanitize(t *testing.T) { + testCases := []struct { + in, out string + }{ + {in: "hello", out: "hello"}, + {in: "hE/l1o", out: "hE_l1o"}, + {in: "he,*ll(.o", out: "he_ll_o"}, + {in: "hello_there%^&", out: "hello_there_"}, + } + + var buf bytes.Buffer + w := bufio.NewWriter(&buf) + + for i, tc := range testCases { + if err := writeSanitized(w, tc.in); err != nil { + t.Fatalf("write failed: %v", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush failed: %v", err) + } + + if want, got := tc.out, buf.String(); want != got { + t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) + } + + buf.Reset() + } +} + +func TestWriteSummary(t *testing.T) { + sumVec := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"labelname"}, + ) + + sumVec.WithLabelValues("val1").Observe(float64(10)) + sumVec.WithLabelValues("val1").Observe(float64(20)) + sumVec.WithLabelValues("val1").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(20)) + sumVec.WithLabelValues("val2").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(sumVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +` + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestWriteHistogram(t *testing.T) { + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Buckets: []float64{0.01, 0.02, 0.05, 0.1}, + }, + []string{"labelname"}, + ) + + histVec.WithLabelValues("val1").Observe(float64(10)) + histVec.WithLabelValues("val1").Observe(float64(20)) + histVec.WithLabelValues("val1").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(20)) + histVec.WithLabelValues("val2").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(histVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043 +` + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestToReader(t *testing.T) { + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + + reg := prometheus.NewRegistry() + reg.MustRegister(cntVec) + + want := `prefix.name.constname.constvalue.labelname.val1 1 1477043 +prefix.name.constname.constvalue.labelname.val2 1 1477043 +` + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestPush(t *testing.T) { + reg := prometheus.NewRegistry() + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + host := "localhost" + port := ":56789" + b, err := NewBridge(&Config{ + URL: host + port, + Gatherer: reg, + Prefix: "prefix", + }) + if err != nil { + t.Fatalf("error creating bridge: %v", err) + } + + nmg, err := newMockGraphite(port) + if err != nil { + t.Fatalf("error creating mock graphite: %v", err) + } + defer nmg.Close() + + err = b.Push() + if err != nil { + t.Fatalf("error pushing: %v", err) + } + + wants := []string{ + "prefix.name.constname.constvalue.labelname.val1 1", + "prefix.name.constname.constvalue.labelname.val2 1", + } + + select { + case got := <-nmg.readc: + for _, want := range wants { + matched, err := regexp.MatchString(want, got) + if err != nil { + t.Fatalf("error pushing: %v", err) + } + if !matched { + t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got) + } + } + return + case err := <-nmg.errc: + t.Fatalf("error reading push: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("no result from graphite server") + } +} + +func newMockGraphite(port string) (*mockGraphite, error) { + readc := make(chan string) + errc := make(chan error) + ln, err := net.Listen("tcp", port) + if err != nil { + return nil, err + } + + go func() { + conn, err := ln.Accept() + if err != nil { + errc <- err + } + var b bytes.Buffer + io.Copy(&b, conn) + readc <- b.String() + }() + + return &mockGraphite{ + readc: readc, + errc: errc, + Listener: ln, + }, nil +} + +type mockGraphite struct { + readc chan string + errc chan error + + net.Listener +} + +func ExampleBridge() { + b, err := NewBridge(&Config{ + URL: "graphite.example.org:3099", + Gatherer: prometheus.DefaultGatherer, + Prefix: "prefix", + Interval: 15 * time.Second, + Timeout: 10 * time.Second, + ErrorHandling: AbortOnError, + Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile), + }) + if err != nil { + panic(err) + } + + go func() { + // Start something in a goroutine that uses metrics. + }() + + // Push initial metrics to Graphite. Fail fast if the push fails. + if err := b.Push(); err != nil { + panic(err) + } + + // Create a Context to control stopping the Run() loop that pushes + // metrics to Graphite. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start pushing metrics to Graphite in the Run() loop. + b.Run(ctx) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram.go new file mode 100644 index 00000000..331783a7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram.go @@ -0,0 +1,505 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram_test.go new file mode 100644 index 00000000..5a20f4b6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/histogram_test.go @@ -0,0 +1,348 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +func TestHistogramNonMonotonicBuckets(t *testing.T) { + testCases := map[string][]float64{ + "not strictly monotonic": {1, 2, 2, 3}, + "not monotonic at all": {1, 2, 4, 3, 5}, + "have +Inf in the middle": {1, 2, math.Inf(+1), 3}, + } + for name, buckets := range testCases { + func() { + defer func() { + if r := recover(); r == nil { + t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name) + } + }() + _ = NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: buckets, + }) + }() + } +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.(Histogram).Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http.go new file mode 100644 index 00000000..4d08154b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http.go @@ -0,0 +1,512 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using +// promhttp.InstrumentedHandler instead. +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http_test.go new file mode 100644 index 00000000..dd89ccf1 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/http_test.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + body := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", body) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + } + + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + err := Register(reqCnt) + if err == nil { + t.Fatal("expected reqCnt to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + err = Register(reqDur) + if err == nil { + t.Fatal("expected reqDur to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + err = Register(reqSz) + if err == nil { + t.Fatal("expected reqSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + err = Register(resSz) + if err == nil { + t.Fatal("expected resSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if resp.Body.String() != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", resp.Body.String()) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.metricMap.metrics); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/labels.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/labels.go new file mode 100644 index 00000000..2502e373 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric.go new file mode 100644 index 00000000..76035bca --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric_test.go new file mode 100644 index 00000000..7145f5e5 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/observer.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/observer.go new file mode 100644 index 00000000..5806cd09 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector.go new file mode 100644 index 00000000..b80adc6e --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector.go @@ -0,0 +1,139 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time for the given process ID under the given namespace. +// +// Currently, the collector depends on a Linux-style proc filesystem and +// therefore only exports metrics for Linux. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is +// determined on each collect anew by calling the given pidFn function. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector_test.go new file mode 100644 index 00000000..d59ad775 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/process_collector_test.go @@ -0,0 +1,60 @@ +// +build linux + +package prometheus + +import ( + "bytes" + "os" + "regexp" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } + + mfs, err := registry.Gather() + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nprocess_max_fds [1-9]"), + regexp.MustCompile("\nprocess_open_fds [1-9]"), + regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promauto/auto.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promauto/auto.go new file mode 100644 index 00000000..a00ba1eb --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promauto/auto.go @@ -0,0 +1,223 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promauto provides constructors for the usual Prometheus metrics that +// return them already registered with the global registry +// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any +// references to the registry altogether, but all the constructors in this +// package will panic if the registration fails. +// +// The following example is a complete program to create a histogram of normally +// distributed random numbers from the math/rand package: +// +// package main +// +// import ( +// "math/rand" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } +// +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// Prometheus's version of a minimal hello-world program: +// +// package main +// +// import ( +// "fmt" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// This appears very handy. So why are these constructors locked away in a +// separate package? There are two caveats: +// +// First, in more complex programs, global state is often quite problematic. +// That's the reason why the metrics constructors in the prometheus package do +// not interact with the global prometheus.DefaultRegisterer on their own. You +// are free to use the Register or MustRegister functions to register them with +// the global prometheus.DefaultRegisterer, but you could as well choose a local +// Registerer (usually created with prometheus.NewRegistry, but there are other +// scenarios, e.g. testing). +// +// The second issue is that registration may fail, e.g. if a metric inconsistent +// with the newly to be registered one is already registered. But how to signal +// and handle a panic in the automatic registration with the default registry? +// The only way is panicking. While panicking on invalid input provided by the +// programmer is certainly fine, things are a bit more subtle in this case: You +// might just add another package to the program, and that package (in its init +// function) happens to register a metric with the same name as your code. Now, +// all of a sudden, either your code or the code of the newly imported package +// panics, depending on initialization order, without any opportunity to handle +// the case gracefully. Even worse is a scenario where registration happens +// later during the runtime (e.g. upon loading some kind of plugin), where the +// panic could be triggered long after the code has been deployed to +// production. A possibility to panic should be explicitly called out by the +// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of +// constructors in the prometheus package called MustRegisterNewCounterVec or +// similar would be quite unwieldy. Adding an extra MustRegister method to each +// metric, returning the registered metric, would result in nice code for those +// using the method, but would pollute every single metric interface for +// everybody avoiding the global registry. +// +// To address both issues, the problematic auto-registering and possibly +// panicking constructors are all in this package with a clear warning +// ahead. And whoever cares about avoiding global state and possibly panicking +// function calls can simply ignore the existence of the promauto package +// altogether. +// +// A final note: There is a similar case in the net/http package of the standard +// library. It has DefaultServeMux as a global instance of ServeMux, and the +// Handle function acts on it, panicking if a handler for the same pattern has +// already been registered. However, one might argue that the whole HTTP routing +// is usually set up closely together in the same package or file, while +// Prometheus metrics tend to be spread widely over the codebase, increasing the +// chance of surprising registration failures. Furthermore, the use of global +// state in net/http has been criticized widely, and some avoid it altogether. +package promauto + +import "github.com/prometheus/client_golang/prometheus" + +// NewCounter works like the function of the same name in the prometheus package +// but it automatically registers the Counter with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. +func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { + c := prometheus.NewCounter(opts) + prometheus.MustRegister(c) + return c +} + +// NewCounterVec works like the function of the same name in the prometheus +// package but it automatically registers the CounterVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec +// panics. +func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { + c := prometheus.NewCounterVec(opts, labelNames) + prometheus.MustRegister(c) + return c +} + +// NewCounterFunc works like the function of the same name in the prometheus +// package but it automatically registers the CounterFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc +// panics. +func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { + g := prometheus.NewCounterFunc(opts, function) + prometheus.MustRegister(g) + return g +} + +// NewGauge works like the function of the same name in the prometheus package +// but it automatically registers the Gauge with the +// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. +func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { + g := prometheus.NewGauge(opts) + prometheus.MustRegister(g) + return g +} + +// NewGaugeVec works like the function of the same name in the prometheus +// package but it automatically registers the GaugeVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. +func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { + g := prometheus.NewGaugeVec(opts, labelNames) + prometheus.MustRegister(g) + return g +} + +// NewGaugeFunc works like the function of the same name in the prometheus +// package but it automatically registers the GaugeFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. +func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { + g := prometheus.NewGaugeFunc(opts, function) + prometheus.MustRegister(g) + return g +} + +// NewSummary works like the function of the same name in the prometheus package +// but it automatically registers the Summary with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. +func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { + s := prometheus.NewSummary(opts) + prometheus.MustRegister(s) + return s +} + +// NewSummaryVec works like the function of the same name in the prometheus +// package but it automatically registers the SummaryVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec +// panics. +func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { + s := prometheus.NewSummaryVec(opts, labelNames) + prometheus.MustRegister(s) + return s +} + +// NewHistogram works like the function of the same name in the prometheus +// package but it automatically registers the Histogram with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. +func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { + h := prometheus.NewHistogram(opts) + prometheus.MustRegister(h) + return h +} + +// NewHistogramVec works like the function of the same name in the prometheus +// package but it automatically registers the HistogramVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec +// panics. +func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { + h := prometheus.NewHistogramVec(opts, labelNames) + prometheus.MustRegister(h) + return h +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator.go new file mode 100644 index 00000000..9c1c66dc --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator.go @@ -0,0 +1,199 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } + +func (d *closeNotifierDelegator) CloseNotify() <-chan bool { + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d *flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return &closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return &flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return &hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 00000000..75a905e2 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,181 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +type pusherDelegator struct{ *responseWriterDelegator } + +func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func init() { + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return &pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, &pusherDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, &pusherDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, &pusherDelegator{d}, &readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_pre_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_pre_1_8.go new file mode 100644 index 00000000..8bb9b8b6 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/delegator_pre_1_8.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + + return pickDelegator[id](d) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http.go new file mode 100644 index 00000000..01357374 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http.go @@ -0,0 +1,311 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var inFlightSem chan struct{} + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + + h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(w, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { + opts.ErrorLog.Println("error while sending encoded metrics:", err) + } + // TODO(beorn7): Consider streaming serving of metrics. + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http_test.go new file mode 100644 index 00000000..24d2f8cf --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/http_test.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bytes" + "errors" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type errorCollector struct{} + +func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) +} + +func (e errorCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.NewInvalidMetric( + prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), + errors.New("collect error"), + ) +} + +type blockingCollector struct { + CollectStarted, Block chan struct{} +} + +func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil) +} + +func (b blockingCollector) Collect(ch chan<- prometheus.Metric) { + select { + case b.CollectStarted <- struct{}{}: + default: + } + // Collects nothing, just waits for a channel receive. + <-b.Block +} + +func TestHandlerErrorHandling(t *testing.T) { + + // Create a registry that collects a MetricFamily with two elements, + // another with one, and reports an error. + reg := prometheus.NewRegistry() + + cnt := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "the_count", + Help: "Ah-ah-ah! Thunder and lightning!", + }) + reg.MustRegister(cnt) + + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + reg.MustRegister(errorCollector{}) + + logBuf := &bytes.Buffer{} + logger := log.New(logBuf, "", 0) + + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + errorHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: HTTPErrorOnError, + }) + continueHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: ContinueOnError, + }) + panicHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: PanicOnError, + }) + wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantErrorBody := `An error has occurred during metrics gathering: + +error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantOKBody := `# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +# HELP the_count Ah-ah-ah! Thunder and lightning! +# TYPE the_count counter +the_count 0 +` + + errorHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusInternalServerError; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message:\n%s\nwant log message:\n%s\n", got, wantMsg) + } + if got := writer.Body.String(); got != wantErrorBody { + t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) + } + logBuf.Reset() + writer.Body.Reset() + writer.Code = http.StatusOK + + continueHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message %q, want %q", got, wantMsg) + } + if got := writer.Body.String(); got != wantOKBody { + t.Errorf("got body %q, want %q", got, wantOKBody) + } + + defer func() { + if err := recover(); err == nil { + t.Error("expected panic from panicHandler") + } + }() + panicHandler.ServeHTTP(writer, request) +} + +func TestInstrumentMetricHandler(t *testing.T) { + reg := prometheus.NewRegistry() + handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + // Do it again to test idempotency. + InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want := "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + + writer.Body.Reset() + handler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + + want = "promhttp_metric_handler_requests_in_flight 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } + want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n" + if got := writer.Body.String(); !strings.Contains(got, want) { + t.Errorf("got body %q, does not contain %q", got, want) + } +} + +func TestHandlerMaxRequestsInFlight(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1}) + w1 := httptest.NewRecorder() + w2 := httptest.NewRecorder() + w3 := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + rq1Done := make(chan struct{}) + go func() { + handler.ServeHTTP(w1, request) + close(rq1Done) + }() + <-c.CollectStarted + + handler.ServeHTTP(w2, request) + + if got, want := w2.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) + <-rq1Done + + handler.ServeHTTP(w3, request) + + if got, want := w3.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } +} + +func TestHandlerTimeout(t *testing.T) { + reg := prometheus.NewRegistry() + handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond}) + w := httptest.NewRecorder() + + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} + reg.MustRegister(c) + + handler.ServeHTTP(w, request) + + if got, want := w.Code, http.StatusServiceUnavailable; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want { + t.Errorf("got body %q, want %q", got, want) + } + + close(c.Block) // To not leak a goroutine. +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client.go new file mode 100644 index 00000000..86fd5644 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client.go @@ -0,0 +1,97 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 00000000..a034d1ec --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8_test.go new file mode 100644 index 00000000..7e3f5229 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_client_1_8_test.go @@ -0,0 +1,195 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "log" + "net/http" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestClientMiddlewareAPI(t *testing.T) { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{"method"}, + ) + + reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + client.Transport = InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + resp, err := client.Get("http://google.com") + if err != nil { + t.Fatalf("%v", err) + } + defer resp.Body.Close() +} + +func ExampleInstrumentRoundTripperDuration() { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + // dnsLatencyVec uses custom buckets based on expected dns durations. + // It has an instance label "event", which is set in the + // DNSStart and DNSDonehook functions defined in the + // InstrumentTrace struct below. + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + // tlsLatencyVec uses custom buckets based on expected tls durations. + // It has an instance label "event", which is set in the + // TLSHandshakeStart and TLSHandshakeDone hook functions defined in the + // InstrumentTrace struct below. + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + // histVec has no labels, making it a zero-dimensional ObserverVec. + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{}, + ) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + // Define functions for the available httptrace.ClientTrace hook + // functions that we want to instrument. + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + // Wrap the default RoundTripper with middleware. + roundTripper := InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + // Set the RoundTripper on our client. + client.Transport = roundTripper + + resp, err := client.Get("http://google.com") + if err != nil { + log.Printf("error: %v", err) + } + defer resp.Body.Close() +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server.go new file mode 100644 index 00000000..9db24380 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server_test.go new file mode 100644 index 00000000..716c6f45 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/promhttp/instrument_server_test.go @@ -0,0 +1,401 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "io" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestLabelCheck(t *testing.T) { + scenarios := map[string]struct { + varLabels []string + constLabels []string + curriedLabels []string + ok bool + }{ + "empty": { + varLabels: []string{}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "code as single var label": { + varLabels: []string{"code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "method as single var label": { + varLabels: []string{"method"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "cade and method as var labels": { + varLabels: []string{"method", "code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: true, + }, + "valid case with all labels used": { + varLabels: []string{"code", "method"}, + constLabels: []string{"foo", "bar"}, + curriedLabels: []string{"dings", "bums"}, + ok: true, + }, + "unsupported var label": { + varLabels: []string{"foo"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: false, + }, + "mixed var labels": { + varLabels: []string{"method", "foo", "code"}, + constLabels: []string{}, + curriedLabels: []string{}, + ok: false, + }, + "unsupported var label but curried": { + varLabels: []string{}, + constLabels: []string{}, + curriedLabels: []string{"foo"}, + ok: true, + }, + "mixed var labels but unsupported curried": { + varLabels: []string{"code", "method"}, + constLabels: []string{}, + curriedLabels: []string{"foo"}, + ok: true, + }, + "supported label as const and curry": { + varLabels: []string{}, + constLabels: []string{"code"}, + curriedLabels: []string{"method"}, + ok: true, + }, + "supported label as const and curry with unsupported as var": { + varLabels: []string{"foo"}, + constLabels: []string{"code"}, + curriedLabels: []string{"method"}, + ok: false, + }, + } + + for name, sc := range scenarios { + t.Run(name, func(t *testing.T) { + constLabels := prometheus.Labels{} + for _, l := range sc.constLabels { + constLabels[l] = "dummy" + } + c := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "c", + Help: "c help", + ConstLabels: constLabels, + }, + append(sc.varLabels, sc.curriedLabels...), + ) + o := prometheus.ObserverVec(prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "c", + Help: "c help", + ConstLabels: constLabels, + }, + append(sc.varLabels, sc.curriedLabels...), + )) + for _, l := range sc.curriedLabels { + c = c.MustCurryWith(prometheus.Labels{l: "dummy"}) + o = o.MustCurryWith(prometheus.Labels{l: "dummy"}) + } + + func() { + defer func() { + if err := recover(); err != nil { + if sc.ok { + t.Error("unexpected panic:", err) + } + } else if !sc.ok { + t.Error("expected panic") + } + }() + InstrumentHandlerCounter(c, nil) + }() + func() { + defer func() { + if err := recover(); err != nil { + if sc.ok { + t.Error("unexpected panic:", err) + } + } else if !sc.ok { + t.Error("expected panic") + } + }() + InstrumentHandlerDuration(o, nil) + }() + if sc.ok { + // Test if wantCode and wantMethod were detected correctly. + var wantCode, wantMethod bool + for _, l := range sc.varLabels { + if l == "code" { + wantCode = true + } + if l == "method" { + wantMethod = true + } + } + gotCode, gotMethod := checkLabels(c) + if gotCode != wantCode { + t.Errorf("wanted code=%t for counter, got code=%t", wantCode, gotCode) + } + if gotMethod != wantMethod { + t.Errorf("wanted method=%t for counter, got method=%t", wantMethod, gotMethod) + } + gotCode, gotMethod = checkLabels(o) + if gotCode != wantCode { + t.Errorf("wanted code=%t for observer, got code=%t", wantCode, gotCode) + } + if gotMethod != wantMethod { + t.Errorf("wanted method=%t for observer, got method=%t", wantMethod, gotMethod) + } + } + }) + } +} + +func TestMiddlewareAPI(t *testing.T) { + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{"method"}, + ) + + writeHeaderVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "write_header_duration_seconds", + Help: "A histogram of time to first write latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{}, + ) + + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "push_request_size_bytes", + Help: "A histogram of request sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("OK")) + }) + + reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec) + + chain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerCounter(counter, + InstrumentHandlerDuration(histVec, + InstrumentHandlerTimeToWriteHeader(writeHeaderVec, + InstrumentHandlerResponseSize(responseSize, handler), + ), + ), + ), + ) + + r, _ := http.NewRequest("GET", "www.example.com", nil) + w := httptest.NewRecorder() + chain.ServeHTTP(w, r) +} + +func TestInstrumentTimeToFirstWrite(t *testing.T) { + var i int + dobs := &responseWriterDelegator{ + ResponseWriter: httptest.NewRecorder(), + observeWriteHeader: func(status int) { + i = status + }, + } + d := newDelegator(dobs, nil) + + d.WriteHeader(http.StatusOK) + + if i != http.StatusOK { + t.Fatalf("failed to execute observeWriteHeader") + } +} + +// testResponseWriter is an http.ResponseWriter that also implements +// http.CloseNotifier, http.Flusher, and io.ReaderFrom. +type testResponseWriter struct { + closeNotifyCalled, flushCalled, readFromCalled bool +} + +func (t *testResponseWriter) Header() http.Header { return nil } +func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil } +func (t *testResponseWriter) WriteHeader(int) {} +func (t *testResponseWriter) CloseNotify() <-chan bool { + t.closeNotifyCalled = true + return nil +} +func (t *testResponseWriter) Flush() { t.flushCalled = true } +func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) { + t.readFromCalled = true + return 0, nil +} + +// testFlusher is an http.ResponseWriter that also implements http.Flusher. +type testFlusher struct { + flushCalled bool +} + +func (t *testFlusher) Header() http.Header { return nil } +func (t *testFlusher) Write([]byte) (int, error) { return 0, nil } +func (t *testFlusher) WriteHeader(int) {} +func (t *testFlusher) Flush() { t.flushCalled = true } + +func TestInterfaceUpgrade(t *testing.T) { + w := &testResponseWriter{} + d := newDelegator(w, nil) + d.(http.CloseNotifier).CloseNotify() + if !w.closeNotifyCalled { + t.Error("CloseNotify not called") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + d.(io.ReaderFrom).ReadFrom(nil) + if !w.readFromCalled { + t.Error("ReadFrom not called") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } + + f := &testFlusher{} + d = newDelegator(f, nil) + if _, ok := d.(http.CloseNotifier); ok { + t.Error("delegator unexpectedly implements http.CloseNotifier") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + if _, ok := d.(io.ReaderFrom); ok { + t.Error("delegator unexpectedly implements io.ReaderFrom") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } +} + +func ExampleInstrumentHandlerDuration() { + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + // duration is partitioned by the HTTP method and handler. It uses custom + // buckets based on the expected request duration. + duration := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of latencies for requests.", + Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, + }, + []string{"handler", "method"}, + ) + + // responseSize has no labels, making it a zero-dimensional + // ObserverVec. + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_size_bytes", + Help: "A histogram of response sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + // Create the handlers that will be wrapped by the middleware. + pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Push")) + }) + pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Pull")) + }) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(inFlightGauge, counter, duration, responseSize) + + // Instrument the handlers with all the metrics, injecting the "handler" + // label by currying. + pushChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "push"}), + InstrumentHandlerCounter(counter, + InstrumentHandlerResponseSize(responseSize, pushHandler), + ), + ), + ) + pullChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}), + InstrumentHandlerCounter(counter, + InstrumentHandlerResponseSize(responseSize, pullHandler), + ), + ), + ) + + http.Handle("/metrics", Handler()) + http.Handle("/push", pushChain) + http.Handle("/pull", pullChain) + + if err := http.ListenAndServe(":3000", nil); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/deprecated.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/deprecated.go new file mode 100644 index 00000000..3d62b572 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/deprecated.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +// This file contains only deprecated code. Remove after v0.9 is released. + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +// FromGatherer triggers a metric collection by the provided Gatherer (which is +// usually implemented by a prometheus.Registry) and pushes all gathered metrics +// to the Pushgateway specified by url, using the provided job name and the +// (optional) further grouping labels (the grouping map may be nil). See the +// Pushgateway documentation for detailed implications of the job and other +// grouping labels. Neither the job name nor any grouping label value may +// contain a "/". The metrics pushed must not contain a job label of their own +// nor any of the grouping labels. +// +// You can use just host:port or ip:port as url, in which case 'http://' is +// added automatically. You can also include the schema in the URL. However, do +// not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and other grouping +// labels will be replaced with the metrics pushed by this call. (It uses HTTP +// method 'PUT' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "PUT") +} + +// AddFromGatherer works like FromGatherer, but only previously pushed metrics +// with the same name (and the same job and other grouping labels) will be +// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) +// +// Deprecated: Please use a Pusher created with New instead. +func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "POST") +} + +func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + + if strings.Contains(job, "/") { + return fmt.Errorf("job contains '/': %s", job) + } + urlComponents := []string{url.QueryEscape(job)} + for ln, lv := range grouping { + if !model.LabelName(ln).IsValid() { + return fmt.Errorf("grouping label has invalid name: %s", ln) + } + if strings.Contains(lv, "/") { + return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + } + urlComponents = append(urlComponents, ln, lv) + } + pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + + mfs, err := g.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} + +// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, +// it collects from the provided collectors directly. It is a convenient way to +// push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "PUT", collectors...) +} + +// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. +// Instead, it collects from the provided collectors directly. It is a +// convenient way to push only a few metrics. +// +// Deprecated: Please use a Pusher created with New instead. +func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "POST", collectors...) +} + +func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { + r := prometheus.NewRegistry() + for _, collector := range collectors { + if err := r.Register(collector); err != nil { + return err + } + } + return push(job, grouping, url, r, method) +} + +// HostnameGroupingKey returns a label map with the only entry +// {instance=""}. This can be conveniently used as the grouping +// parameter if metrics should be pushed with the hostname as label. The +// returned map is created upon each call so that the caller is free to add more +// labels to the map. +// +// Deprecated: Usually, metrics pushed to the Pushgateway should not be +// host-centric. (You would use https://github.com/prometheus/node_exporter in +// that case.) If you have the need to add the hostname to the grouping key, you +// are probably doing something wrong. See +// https://prometheus.io/docs/practices/pushing/ for details. +func HostnameGroupingKey() map[string]string { + hostname, err := os.Hostname() + if err != nil { + return map[string]string{"instance": "unknown"} + } + return map[string]string{"instance": hostname} +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/example_add_from_gatherer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/example_add_from_gatherer_test.go new file mode 100644 index 00000000..dd22b526 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/example_add_from_gatherer_test.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +var ( + completionTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last completion of a DB backup, successful or not.", + }) + successTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_success_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + duration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_duration_seconds", + Help: "The duration of the last DB backup in seconds.", + }) + records = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_records_processed", + Help: "The number of records processed in the last DB backup.", + }) +) + +func performBackup() (int, error) { + // Perform the backup and return the number of backed up records and any + // applicable error. + // ... + return 42, nil +} + +func ExamplePusher_Add() { + // We use a registry here to benefit from the consistency checks that + // happen during registration. + registry := prometheus.NewRegistry() + registry.MustRegister(completionTime, duration, records) + // Note that successTime is not registered. + + pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry) + + start := time.Now() + n, err := performBackup() + records.Set(float64(n)) + // Note that time.Since only uses a monotonic clock in Go1.9+. + duration.Set(time.Since(start).Seconds()) + completionTime.SetToCurrentTime() + if err != nil { + fmt.Println("DB backup failed:", err) + } else { + // Add successTime to pusher only in case of success. + // We could as well register it with the registry. + // This example, however, demonstrates that you can + // mix Gatherers and Collectors when handling a Pusher. + pusher.Collector(successTime) + successTime.SetToCurrentTime() + } + // Add is used here rather than Push to not delete a previously pushed + // success timestamp in case of a failure of this backup. + if err := pusher.Add(); err != nil { + fmt.Println("Could not push to Pushgateway:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/examples_test.go new file mode 100644 index 00000000..fa5549a9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/examples_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +func ExamplePusher_Push() { + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + completionTime.SetToCurrentTime() + if err := push.New("http://pushgateway:9091", "db_backup"). + Collector(completionTime). + Grouping("db", "customers"). + Push(); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push.go new file mode 100644 index 00000000..3721ff19 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package push provides functions to push metrics to a Pushgateway. It uses a +// builder approach. Create a Pusher with New and then add the various options +// by using its methods, finally calling Add or Push, like this: +// +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() +// +// See the examples section for more detailed examples. +// +// See the documentation of the Pushgateway to understand the meaning of +// the grouping key and the differences between Push and Add: +// https://github.com/prometheus/pushgateway +package push + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +const contentTypeHeader = "Content-Type" + +// Pusher manages a push to the Pushgateway. Use New to create one, configure it +// with its methods, and finally use the Add or Push method to push. +type Pusher struct { + error error + + url, job string + grouping map[string]string + + gatherers prometheus.Gatherers + registerer prometheus.Registerer + + client *http.Client + useBasicAuth bool + username, password string +} + +// New creates a new Pusher to push to the provided URL with the provided job +// name. You can use just host:port or ip:port as url, in which case “http://” +// is added automatically. Alternatively, include the schema in the +// URL. However, do not include the “/metrics/jobs/…” part. +// +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, a “/” character in the job name is prohibited. +func New(url, job string) *Pusher { + var ( + reg = prometheus.NewRegistry() + err error + ) + if !strings.Contains(url, "://") { + url = "http://" + url + } + if strings.HasSuffix(url, "/") { + url = url[:len(url)-1] + } + if strings.Contains(job, "/") { + err = fmt.Errorf("job contains '/': %s", job) + } + + return &Pusher{ + error: err, + url: url, + job: job, + grouping: map[string]string{}, + gatherers: prometheus.Gatherers{reg}, + registerer: reg, + client: &http.Client{}, + } +} + +// Push collects/gathers all metrics from all Collectors and Gatherers added to +// this Pusher. Then, it pushes them to the Pushgateway configured while +// creating this Pusher, using the configured job name and any added grouping +// labels as grouping key. All previously pushed metrics with the same job and +// other grouping labels will be replaced with the metrics pushed by this +// call. (It uses HTTP method “PUT” to push to the Pushgateway.) +// +// Push returns the first error encountered by any method call (including this +// one) in the lifetime of the Pusher. +func (p *Pusher) Push() error { + return p.push("PUT") +} + +// Add works like push, but only previously pushed metrics with the same name +// (and the same job and other grouping labels) will be replaced. (It uses HTTP +// method “POST” to push to the Pushgateway.) +func (p *Pusher) Add() error { + return p.push("POST") +} + +// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered +// to push them to the Pushgateway. The gathered metrics must not contain a job +// label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { + p.gatherers = append(p.gatherers, g) + return p +} + +// Collector adds a Collector to the Pusher, from which metrics will be +// collected to push them to the Pushgateway. The collected metrics must not +// contain a job label of their own. +// +// For convenience, this method returns a pointer to the Pusher itself. +func (p *Pusher) Collector(c prometheus.Collector) *Pusher { + if p.error == nil { + p.error = p.registerer.Register(c) + } + return p +} + +// Grouping adds a label pair to the grouping key of the Pusher, replacing any +// previously added label pair with the same label name. Note that setting any +// labels in the grouping key that are already contained in the metrics to push +// will lead to an error. +// +// For convenience, this method returns a pointer to the Pusher itself. +// +// Note that until https://github.com/prometheus/pushgateway/issues/97 is +// resolved, this method does not allow a “/” character in the label value. +func (p *Pusher) Grouping(name, value string) *Pusher { + if p.error == nil { + if !model.LabelName(name).IsValid() { + p.error = fmt.Errorf("grouping label has invalid name: %s", name) + return p + } + if strings.Contains(value, "/") { + p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value) + return p + } + p.grouping[name] = value + } + return p +} + +// Client sets a custom HTTP client for the Pusher. For convenience, this method +// returns a pointer to the Pusher itself. +func (p *Pusher) Client(c *http.Client) *Pusher { + p.client = c + return p +} + +// BasicAuth configures the Pusher to use HTTP Basic Authentication with the +// provided username and password. For convenience, this method returns a +// pointer to the Pusher itself. +func (p *Pusher) BasicAuth(username, password string) *Pusher { + p.useBasicAuth = true + p.username = username + p.password = password + return p +} + +func (p *Pusher) push(method string) error { + if p.error != nil { + return p.error + } + urlComponents := []string{url.QueryEscape(p.job)} + for ln, lv := range p.grouping { + urlComponents = append(urlComponents, ln, lv) + } + pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/")) + + mfs, err := p.gatherers.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := p.grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + if p.useBasicAuth { + req.SetBasicAuth(p.username, p.password) + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := p.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push_test.go new file mode 100644 index 00000000..34ec334b --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/push/push_test.go @@ -0,0 +1,194 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestPush(t *testing.T) { + + var ( + lastMethod string + lastBody []byte + lastPath string + ) + + // Fake a Pushgateway that always responds with 202. + pgwOK := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lastMethod = r.Method + var err error + lastBody, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + lastPath = r.URL.EscapedPath() + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + w.WriteHeader(http.StatusAccepted) + }), + ) + defer pgwOK.Close() + + // Fake a Pushgateway that always responds with 500. + pgwErr := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "fake error", http.StatusInternalServerError) + }), + ) + defer pgwErr.Close() + + metric1 := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "testname1", + Help: "testhelp1", + }) + metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testname2", + Help: "testhelp2", + ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, + }) + + reg := prometheus.NewRegistry() + reg.MustRegister(metric1) + reg.MustRegister(metric2) + + mfs, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + t.Fatal(err) + } + } + wantBody := buf.Bytes() + + // Push some Collectors, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // Add some Collectors, with nil grouping, all good. + if err := New(pgwOK.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Add(); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for Add, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // Push some Collectors with a broken PGW. + if err := New(pgwErr.URL, "testjob"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push to broken Pushgateway succeeded") + } else { + if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { + t.Errorf("got error %q, want %q", got, want) + } + } + + // Push some Collectors with invalid grouping or job. + if err := New(pgwOK.URL, "testjob"). + Grouping("foo", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with grouping contained in metrics succeeded") + } + if err := New(pgwOK.URL, "test/job"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid job value succeeded") + } + if err := New(pgwOK.URL, "testjob"). + Grouping("foobar", "bu/ms"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid grouping succeeded") + } + if err := New(pgwOK.URL, "testjob"). + Grouping("foo-bar", "bums"). + Collector(metric1). + Collector(metric2). + Push(); err == nil { + t.Error("push with invalid grouping succeeded") + } + + // Push registry, all good. + if err := New(pgwOK.URL, "testjob"). + Gatherer(reg). + Push(); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + + // Add registry, all good. + if err := New(pgwOK.URL, "testjob"). + Grouping("a", "x"). + Grouping("b", "y"). + Gatherer(reg). + Add(); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for Add, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { + t.Error("unexpected path:", lastPath) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry.go new file mode 100644 index 00000000..1f9adb80 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry.go @@ -0,0 +1,808 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "sort" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + collectors := make(chan Collector, len(r.collectorsByID)) + for _, collector := range r.collectorsByID { + collectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + } + break collectLoop + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry_test.go new file mode 100644 index 00000000..653df4f7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/registry_test.go @@ -0,0 +1,590 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func testHandler(t testing.TB) { + + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("\xFF"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering: + +collected metric's label constname is not utf8: "\xff" +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector prometheus.Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + { // 16 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; charset=utf-8`, + }, + body: expectedMetricFamilyInvalidLabelValueAsText, + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithInvalidLabelValue, + }, + }, + } + for i, scenario := range scenarios { + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.HeaderMap.Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + t.Errorf( + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + var err error + if err = prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err = prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + if are.ExistingCollector != original { + t.Error("expected original collector but got something else") + } + if are.ExistingCollector == equalButNotSame { + t.Error("expected original callector but got new one") + } + } else { + t.Error("unexpected error:", err) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary.go new file mode 100644 index 00000000..f7dc85b9 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary.go @@ -0,0 +1,609 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. While all other fields +// are optional and can safely be left at their zero value, it is recommended to +// explicitly set the Objectives field to the desired value as the default value +// will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary_test.go new file mode 100644 index 00000000..b162ed94 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/summary_test.go @@ -0,0 +1,388 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestSummaryWithDefaultObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithDefaultObjectives := NewSummary(SummaryOpts{ + Name: "default_objectives", + Help: "Test help.", + }) + if err := reg.Register(summaryWithDefaultObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithDefaultObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != len(DefObjectives) { + t.Error("expected default objectives in summary") + } +} + +func TestSummaryWithoutObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithEmptyObjectives := NewSummary(SummaryOpts{ + Name: "empty_objectives", + Help: "Test help.", + Objectives: map[float64]float64{}, + }) + if err := reg.Register(summaryWithEmptyObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithEmptyObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != 0 { + t.Error("expected no objectives in summary") + } +} + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.(Summary).Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer.go new file mode 100644 index 00000000..b8fc5f18 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer_test.go new file mode 100644 index 00000000..29490206 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/timer_test.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestTimerObserve(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{Name: "test_histogram"}) + sum = NewSummary(SummaryOpts{Name: "test_summary"}) + gauge = NewGauge(GaugeOpts{Name: "test_gauge"}) + ) + + func() { + hisTimer := NewTimer(his) + sumTimer := NewTimer(sum) + gaugeTimer := NewTimer(ObserverFunc(gauge.Set)) + defer hisTimer.ObserveDuration() + defer sumTimer.ObserveDuration() + defer gaugeTimer.ObserveDuration() + }() + + m := &dto.Metric{} + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + m.Reset() + sum.Write(m) + if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got { + t.Errorf("want %d observations for summary, got %d", want, got) + } + m.Reset() + gauge.Write(m) + if got := m.GetGauge().GetValue(); got <= 0 { + t.Errorf("want value > 0 for gauge, got %f", got) + } +} + +func TestTimerEmpty(t *testing.T) { + emptyTimer := NewTimer(nil) + emptyTimer.ObserveDuration() + // Do nothing, just demonstrate it works without panic. +} + +func TestTimerConditionalTiming(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{ + Name: "test_histogram", + }) + timeMe = true + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + if timeMe { + his.Observe(v) + } + })) + defer timer.ObserveDuration() + } + + timedFunc() // This will time. + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + + timeMe = false + timedFunc() // This will not time again. + m.Reset() + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } +} + +func TestTimerByOutcome(t *testing.T) { + var ( + his = NewHistogramVec( + HistogramOpts{Name: "test_histogram"}, + []string{"outcome"}, + ) + outcome = "foo" + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + his.WithLabelValues(outcome).Observe(v) + })) + defer timer.ObserveDuration() + + if outcome == "foo" { + outcome = "bar" + return + } + outcome = "foo" + } + + timedFunc() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/untyped.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/untyped.go new file mode 100644 index 00000000..0f9ce63f --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value.go new file mode 100644 index 00000000..9fb7eab0 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value_test.go new file mode 100644 index 00000000..eed517e7 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/value_test.go @@ -0,0 +1,43 @@ +package prometheus + +import ( + "fmt" + "testing" +) + +func TestNewConstMetricInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + metricDesc := NewDesc( + "sample_value", + "sample value", + []string{"a"}, + Labels{}, + ) + + expectPanic(t, func() { + MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF") + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil { + t.Errorf("NewConstMetric: expected error because: %s", test.desc) + } + } +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec.go new file mode 100644 index 00000000..14ed9e85 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec_test.go new file mode 100644 index 00000000..bd18a9f4 --- /dev/null +++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/prometheus/vec_test.go @@ -0,0 +1,535 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestDelete(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDelete(t, vec) +} + +func TestDeleteWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDelete(t, vec) +} + +func testDelete(t *testing.T, vec *GaugeVec) { + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDeleteLabelValues(t, vec) +} + +func TestDeleteLabelValuesWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDeleteLabelValues(t, vec) +} + +func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + // Delete out of order. + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestMetricVec(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testMetricVec(t, vec) +} + +func TestMetricVecWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testMetricVec(t, vec) +} + +func testMetricVec(t *testing.T, vec *GaugeVec) { + vec.Reset() // Actually test Reset now! + + var pair [2]string + // Keep track of metrics. + expected := map[[2]string]int{} + + for i := 0; i < 1000; i++ { + pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. + expected[pair]++ + vec.WithLabelValues(pair[0], pair[1]).Inc() + + expected[[2]string{"v1", "v2"}]++ + vec.WithLabelValues("v1", "v2").(Gauge).Inc() + } + + var total int + for _, metrics := range vec.metricMap.metrics { + for _, metric := range metrics { + total++ + copy(pair[:], metric.values) + + var metricOut dto.Metric + if err := metric.metric.Write(&metricOut); err != nil { + t.Fatal(err) + } + actual := *metricOut.Gauge.Value + + var actualPair [2]string + for i, label := range metricOut.Label { + actualPair[i] = *label.Value + } + + // Test output pair against metric.values to ensure we've selected + // the right one. We check this to ensure the below check means + // anything at all. + if actualPair != pair { + t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) + } + + if actual != float64(expected[pair]) { + t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) + } + } + } + + if total != len(expected) { + t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) + } + + vec.Reset() + + if len(vec.metricMap.metrics) > 0 { + t.Fatalf("reset failed") + } +} + +func TestCounterVecEndToEndWithCollision(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"labelname"}, + ) + vec.WithLabelValues("77kepQFQ8Kl").Inc() + vec.WithLabelValues("!0IC=VloaY").Add(2) + + m := &dto.Metric{} + if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 1.; got != want { + t.Errorf("got value %f, want %f", got, want) + } + m.Reset() + if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 2.; got != want { + t.Errorf("got value %f, want %f", got, want) + } +} + +func TestCurryVec(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"one", "two", "three"}, + ) + testCurryVec(t, vec) +} + +func TestCurryVecWithCollisions(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"one", "two", "three"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testCurryVec(t, vec) +} + +func testCurryVec(t *testing.T, vec *CounterVec) { + + assertMetrics := func(t *testing.T) { + n := 0 + for _, m := range vec.metricMap.metrics { + n += len(m) + } + if n != 2 { + t.Error("expected two metrics, got", n) + } + m := &dto.Metric{} + c1, err := vec.GetMetricWithLabelValues("1", "2", "3") + if err != nil { + t.Fatal("unexpected error getting metric:", err) + } + c1.Write(m) + if want, got := 1., m.GetCounter().GetValue(); want != got { + t.Errorf("want %f as counter value, got %f", want, got) + } + m.Reset() + c2, err := vec.GetMetricWithLabelValues("11", "22", "33") + if err != nil { + t.Fatal("unexpected error getting metric:", err) + } + c2.Write(m) + if want, got := 1., m.GetCounter().GetValue(); want != got { + t.Errorf("want %f as counter value, got %f", want, got) + } + } + + assertNoMetric := func(t *testing.T) { + if n := len(vec.metricMap.metrics); n != 0 { + t.Error("expected no metrics, got", n) + } + } + + t.Run("zero labels", func(t *testing.T) { + c1 := vec.MustCurryWith(nil) + c2 := vec.MustCurryWith(nil) + c1.WithLabelValues("1", "2", "3").Inc() + c2.With(Labels{"one": "11", "two": "22", "three": "33"}).Inc() + assertMetrics(t) + if !c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "22", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("first label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"one": "1"}) + c2 := vec.MustCurryWith(Labels{"one": "11"}) + c1.WithLabelValues("2", "3").Inc() + c2.With(Labels{"two": "22", "three": "33"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22", "three": "33"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2", "3") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("middle label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"two": "2"}) + c2 := vec.MustCurryWith(Labels{"two": "22"}) + c1.WithLabelValues("1", "3").Inc() + c2.With(Labels{"one": "11", "three": "33"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"one": "11", "three": "33"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("1", "3") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"one": "1", "three": "3"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "33") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("last label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}) + c2 := vec.MustCurryWith(Labels{"three": "33"}) + c1.WithLabelValues("1", "2").Inc() + c2.With(Labels{"one": "11", "two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22", "one": "11"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("1", "2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2", "one": "1"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("11", "22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("two labels", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3", "one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11"}) + c1.WithLabelValues("2").Inc() + c2.With(Labels{"two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("all labels", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3", "two": "2", "one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11", "two": "22"}) + c1.WithLabelValues().Inc() + c2.With(nil).Inc() + assertMetrics(t) + if !c1.Delete(Labels{}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues() { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("double curry", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}).MustCurryWith(Labels{"one": "1"}) + c2 := vec.MustCurryWith(Labels{"three": "33"}).MustCurryWith(Labels{"one": "11"}) + c1.WithLabelValues("2").Inc() + c2.With(Labels{"two": "22"}).Inc() + assertMetrics(t) + if c1.Delete(Labels{"two": "22"}) { + t.Error("deletion unexpectedly succeeded") + } + if c2.DeleteLabelValues("2") { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"two": "2"}) { + t.Error("deletion failed") + } + if !c2.DeleteLabelValues("22") { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("use already curried label", func(t *testing.T) { + c1 := vec.MustCurryWith(Labels{"three": "3"}) + if _, err := c1.GetMetricWithLabelValues("1", "2", "3"); err == nil { + t.Error("expected error when using already curried label") + } + if _, err := c1.GetMetricWith(Labels{"one": "1", "two": "2", "three": "3"}); err == nil { + t.Error("expected error when using already curried label") + } + assertNoMetric(t) + c1.WithLabelValues("1", "2").Inc() + if c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { + t.Error("deletion unexpectedly succeeded") + } + if !c1.Delete(Labels{"one": "1", "two": "2"}) { + t.Error("deletion failed") + } + assertNoMetric(t) + }) + t.Run("curry already curried label", func(t *testing.T) { + if _, err := vec.MustCurryWith(Labels{"three": "3"}).CurryWith(Labels{"three": "33"}); err == nil { + t.Error("currying unexpectedly succeeded") + } else if err.Error() != `label name "three" is already curried` { + t.Error("currying returned unexpected error:", err) + } + + }) + t.Run("unknown label", func(t *testing.T) { + if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil { + t.Error("currying unexpectedly succeeded") + } else if err.Error() != "1 unknown label(s) found during currying" { + t.Error("currying returned unexpected error:", err) + } + }) +} + +func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { + benchmarkMetricVecWithLabelValues(b, map[string][]string{ + "l1": {"onevalue"}, + "l2": {"twovalue"}, + }) +} + +func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) +} + +func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) +} + +func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) +} + +func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { + labels := map[string][]string{} + + for i := 0; i < nkeys; i++ { + var ( + k = fmt.Sprintf("key-%v", i) + vs = make([]string, 0, nvalues) + ) + for j := 0; j < nvalues; j++ { + vs = append(vs, fmt.Sprintf("value-%v", j)) + } + labels[k] = vs + } + + benchmarkMetricVecWithLabelValues(b, labels) +} + +func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { + var keys []string + for k := range labels { // Map order dependent, who cares though. + keys = append(keys, k) + } + + values := make([]string, len(labels)) // Value cache for permutations. + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + keys, + ) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Varies input across provide map entries based on key size. + for j, k := range keys { + candidates := labels[k] + values[j] = candidates[i%len(candidates)] + } + + vec.WithLabelValues(values...) + } +}