blob: d86d0cf4b0e98a799f24f84f64b7dbfe20390dc6 [file] [log] [blame]
khenaidooab1f7bd2019-11-14 14:00:27 -05001// Copyright 2016 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Package promhttp provides tooling around HTTP servers and clients.
15//
16// First, the package allows the creation of http.Handler instances to expose
17// Prometheus metrics via HTTP. promhttp.Handler acts on the
18// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
19// custom registry or anything that implements the Gatherer interface. It also
20// allows the creation of handlers that act differently on errors or allow to
21// log errors.
22//
23// Second, the package provides tooling to instrument instances of http.Handler
24// via middleware. Middleware wrappers follow the naming scheme
25// InstrumentHandlerX, where X describes the intended use of the middleware.
26// See each function's doc comment for specific details.
27//
28// Finally, the package allows for an http.RoundTripper to be instrumented via
29// middleware. Middleware wrappers follow the naming scheme
30// InstrumentRoundTripperX, where X describes the intended use of the
31// middleware. See each function's doc comment for specific details.
32package promhttp
33
34import (
35 "compress/gzip"
36 "fmt"
37 "io"
38 "net/http"
39 "strings"
40 "sync"
41 "time"
42
43 "github.com/prometheus/common/expfmt"
44
45 "github.com/prometheus/client_golang/prometheus"
46)
47
48const (
49 contentTypeHeader = "Content-Type"
50 contentEncodingHeader = "Content-Encoding"
51 acceptEncodingHeader = "Accept-Encoding"
52)
53
54var gzipPool = sync.Pool{
55 New: func() interface{} {
56 return gzip.NewWriter(nil)
57 },
58}
59
60// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
61// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
62// no error logging, and it applies compression if requested by the client.
63//
64// The returned http.Handler is already instrumented using the
65// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
66// create multiple http.Handlers by separate calls of the Handler function, the
67// metrics used for instrumentation will be shared between them, providing
68// global scrape counts.
69//
70// This function is meant to cover the bulk of basic use cases. If you are doing
71// anything that requires more customization (including using a non-default
72// Gatherer, different instrumentation, and non-default HandlerOpts), use the
73// HandlerFor function. See there for details.
74func Handler() http.Handler {
75 return InstrumentMetricHandler(
76 prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
77 )
78}
79
80// HandlerFor returns an uninstrumented http.Handler for the provided
81// Gatherer. The behavior of the Handler is defined by the provided
82// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
83// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
84// instrumentation. Use the InstrumentMetricHandler function to apply the same
85// kind of instrumentation as it is used by the Handler function.
86func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
87 var (
88 inFlightSem chan struct{}
89 errCnt = prometheus.NewCounterVec(
90 prometheus.CounterOpts{
91 Name: "promhttp_metric_handler_errors_total",
92 Help: "Total number of internal errors encountered by the promhttp metric handler.",
93 },
94 []string{"cause"},
95 )
96 )
97
98 if opts.MaxRequestsInFlight > 0 {
99 inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
100 }
101 if opts.Registry != nil {
khenaidood948f772021-08-11 17:49:24 -0400102 // Initialize all possibilities that can occur below.
khenaidooab1f7bd2019-11-14 14:00:27 -0500103 errCnt.WithLabelValues("gathering")
104 errCnt.WithLabelValues("encoding")
105 if err := opts.Registry.Register(errCnt); err != nil {
106 if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
107 errCnt = are.ExistingCollector.(*prometheus.CounterVec)
108 } else {
109 panic(err)
110 }
111 }
112 }
113
114 h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
115 if inFlightSem != nil {
116 select {
117 case inFlightSem <- struct{}{}: // All good, carry on.
118 defer func() { <-inFlightSem }()
119 default:
120 http.Error(rsp, fmt.Sprintf(
121 "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
122 ), http.StatusServiceUnavailable)
123 return
124 }
125 }
126 mfs, err := reg.Gather()
127 if err != nil {
128 if opts.ErrorLog != nil {
129 opts.ErrorLog.Println("error gathering metrics:", err)
130 }
131 errCnt.WithLabelValues("gathering").Inc()
132 switch opts.ErrorHandling {
133 case PanicOnError:
134 panic(err)
135 case ContinueOnError:
136 if len(mfs) == 0 {
137 // Still report the error if no metrics have been gathered.
138 httpError(rsp, err)
139 return
140 }
141 case HTTPErrorOnError:
142 httpError(rsp, err)
143 return
144 }
145 }
146
khenaidood948f772021-08-11 17:49:24 -0400147 var contentType expfmt.Format
148 if opts.EnableOpenMetrics {
149 contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
150 } else {
151 contentType = expfmt.Negotiate(req.Header)
152 }
khenaidooab1f7bd2019-11-14 14:00:27 -0500153 header := rsp.Header()
154 header.Set(contentTypeHeader, string(contentType))
155
156 w := io.Writer(rsp)
157 if !opts.DisableCompression && gzipAccepted(req.Header) {
158 header.Set(contentEncodingHeader, "gzip")
159 gz := gzipPool.Get().(*gzip.Writer)
160 defer gzipPool.Put(gz)
161
162 gz.Reset(w)
163 defer gz.Close()
164
165 w = gz
166 }
167
168 enc := expfmt.NewEncoder(w, contentType)
169
khenaidood948f772021-08-11 17:49:24 -0400170 // handleError handles the error according to opts.ErrorHandling
171 // and returns true if we have to abort after the handling.
172 handleError := func(err error) bool {
173 if err == nil {
174 return false
khenaidooab1f7bd2019-11-14 14:00:27 -0500175 }
khenaidood948f772021-08-11 17:49:24 -0400176 if opts.ErrorLog != nil {
177 opts.ErrorLog.Println("error encoding and sending metric family:", err)
178 }
179 errCnt.WithLabelValues("encoding").Inc()
180 switch opts.ErrorHandling {
181 case PanicOnError:
182 panic(err)
183 case HTTPErrorOnError:
184 // We cannot really send an HTTP error at this
185 // point because we most likely have written
186 // something to rsp already. But at least we can
187 // stop sending.
188 return true
189 }
190 // Do nothing in all other cases, including ContinueOnError.
191 return false
khenaidooab1f7bd2019-11-14 14:00:27 -0500192 }
193
khenaidood948f772021-08-11 17:49:24 -0400194 for _, mf := range mfs {
195 if handleError(enc.Encode(mf)) {
196 return
197 }
198 }
199 if closer, ok := enc.(expfmt.Closer); ok {
200 // This in particular takes care of the final "# EOF\n" line for OpenMetrics.
201 if handleError(closer.Close()) {
202 return
203 }
khenaidooab1f7bd2019-11-14 14:00:27 -0500204 }
205 })
206
207 if opts.Timeout <= 0 {
208 return h
209 }
210 return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
211 "Exceeded configured timeout of %v.\n",
212 opts.Timeout,
213 ))
214}
215
216// InstrumentMetricHandler is usually used with an http.Handler returned by the
217// HandlerFor function. It instruments the provided http.Handler with two
218// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
219// scrapes partitioned by HTTP status code, and a gauge
220// "promhttp_metric_handler_requests_in_flight" to track the number of
221// simultaneous scrapes. This function idempotently registers collectors for
222// both metrics with the provided Registerer. It panics if the registration
223// fails. The provided metrics are useful to see how many scrapes hit the
224// monitored target (which could be from different Prometheus servers or other
225// scrapers), and how often they overlap (which would result in more than one
226// scrape in flight at the same time). Note that the scrapes-in-flight gauge
227// will contain the scrape by which it is exposed, while the scrape counter will
228// only get incremented after the scrape is complete (as only then the status
229// code is known). For tracking scrape durations, use the
230// "scrape_duration_seconds" gauge created by the Prometheus server upon each
231// scrape.
232func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
233 cnt := prometheus.NewCounterVec(
234 prometheus.CounterOpts{
235 Name: "promhttp_metric_handler_requests_total",
236 Help: "Total number of scrapes by HTTP status code.",
237 },
238 []string{"code"},
239 )
240 // Initialize the most likely HTTP status codes.
241 cnt.WithLabelValues("200")
242 cnt.WithLabelValues("500")
243 cnt.WithLabelValues("503")
244 if err := reg.Register(cnt); err != nil {
245 if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
246 cnt = are.ExistingCollector.(*prometheus.CounterVec)
247 } else {
248 panic(err)
249 }
250 }
251
252 gge := prometheus.NewGauge(prometheus.GaugeOpts{
253 Name: "promhttp_metric_handler_requests_in_flight",
254 Help: "Current number of scrapes being served.",
255 })
256 if err := reg.Register(gge); err != nil {
257 if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
258 gge = are.ExistingCollector.(prometheus.Gauge)
259 } else {
260 panic(err)
261 }
262 }
263
264 return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
265}
266
267// HandlerErrorHandling defines how a Handler serving metrics will handle
268// errors.
269type HandlerErrorHandling int
270
271// These constants cause handlers serving metrics to behave as described if
272// errors are encountered.
273const (
274 // Serve an HTTP status code 500 upon the first error
khenaidood948f772021-08-11 17:49:24 -0400275 // encountered. Report the error message in the body. Note that HTTP
276 // errors cannot be served anymore once the beginning of a regular
277 // payload has been sent. Thus, in the (unlikely) case that encoding the
278 // payload into the negotiated wire format fails, serving the response
279 // will simply be aborted. Set an ErrorLog in HandlerOpts to detect
280 // those errors.
khenaidooab1f7bd2019-11-14 14:00:27 -0500281 HTTPErrorOnError HandlerErrorHandling = iota
282 // Ignore errors and try to serve as many metrics as possible. However,
283 // if no metrics can be served, serve an HTTP status code 500 and the
284 // last error message in the body. Only use this in deliberate "best
285 // effort" metrics collection scenarios. In this case, it is highly
286 // recommended to provide other means of detecting errors: By setting an
287 // ErrorLog in HandlerOpts, the errors are logged. By providing a
288 // Registry in HandlerOpts, the exposed metrics include an error counter
289 // "promhttp_metric_handler_errors_total", which can be used for
290 // alerts.
291 ContinueOnError
292 // Panic upon the first error encountered (useful for "crash only" apps).
293 PanicOnError
294)
295
296// Logger is the minimal interface HandlerOpts needs for logging. Note that
297// log.Logger from the standard library implements this interface, and it is
298// easy to implement by custom loggers, if they don't do so already anyway.
299type Logger interface {
300 Println(v ...interface{})
301}
302
303// HandlerOpts specifies options how to serve metrics via an http.Handler. The
304// zero value of HandlerOpts is a reasonable default.
305type HandlerOpts struct {
khenaidood948f772021-08-11 17:49:24 -0400306 // ErrorLog specifies an optional Logger for errors collecting and
307 // serving metrics. If nil, errors are not logged at all. Note that the
308 // type of a reported error is often prometheus.MultiError, which
309 // formats into a multi-line error string. If you want to avoid the
310 // latter, create a Logger implementation that detects a
311 // prometheus.MultiError and formats the contained errors into one line.
khenaidooab1f7bd2019-11-14 14:00:27 -0500312 ErrorLog Logger
313 // ErrorHandling defines how errors are handled. Note that errors are
314 // logged regardless of the configured ErrorHandling provided ErrorLog
315 // is not nil.
316 ErrorHandling HandlerErrorHandling
317 // If Registry is not nil, it is used to register a metric
318 // "promhttp_metric_handler_errors_total", partitioned by "cause". A
319 // failed registration causes a panic. Note that this error counter is
320 // different from the instrumentation you get from the various
321 // InstrumentHandler... helpers. It counts errors that don't necessarily
322 // result in a non-2xx HTTP status code. There are two typical cases:
323 // (1) Encoding errors that only happen after streaming of the HTTP body
324 // has already started (and the status code 200 has been sent). This
325 // should only happen with custom collectors. (2) Collection errors with
326 // no effect on the HTTP status code because ErrorHandling is set to
327 // ContinueOnError.
328 Registry prometheus.Registerer
329 // If DisableCompression is true, the handler will never compress the
330 // response, even if requested by the client.
331 DisableCompression bool
332 // The number of concurrent HTTP requests is limited to
333 // MaxRequestsInFlight. Additional requests are responded to with 503
334 // Service Unavailable and a suitable message in the body. If
335 // MaxRequestsInFlight is 0 or negative, no limit is applied.
336 MaxRequestsInFlight int
337 // If handling a request takes longer than Timeout, it is responded to
338 // with 503 ServiceUnavailable and a suitable Message. No timeout is
339 // applied if Timeout is 0 or negative. Note that with the current
340 // implementation, reaching the timeout simply ends the HTTP requests as
341 // described above (and even that only if sending of the body hasn't
342 // started yet), while the bulk work of gathering all the metrics keeps
343 // running in the background (with the eventual result to be thrown
344 // away). Until the implementation is improved, it is recommended to
345 // implement a separate timeout in potentially slow Collectors.
346 Timeout time.Duration
khenaidood948f772021-08-11 17:49:24 -0400347 // If true, the experimental OpenMetrics encoding is added to the
348 // possible options during content negotiation. Note that Prometheus
349 // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
350 // the only way to transmit exemplars. However, the move to OpenMetrics
351 // is not completely transparent. Most notably, the values of "quantile"
352 // labels of Summaries and "le" labels of Histograms are formatted with
353 // a trailing ".0" if they would otherwise look like integer numbers
354 // (which changes the identity of the resulting series on the Prometheus
355 // server).
356 EnableOpenMetrics bool
khenaidooab1f7bd2019-11-14 14:00:27 -0500357}
358
359// gzipAccepted returns whether the client will accept gzip-encoded content.
360func gzipAccepted(header http.Header) bool {
361 a := header.Get(acceptEncodingHeader)
362 parts := strings.Split(a, ",")
363 for _, part := range parts {
364 part = strings.TrimSpace(part)
365 if part == "gzip" || strings.HasPrefix(part, "gzip;") {
366 return true
367 }
368 }
369 return false
370}
371
372// httpError removes any content-encoding header and then calls http.Error with
khenaidood948f772021-08-11 17:49:24 -0400373// the provided error and http.StatusInternalServerError. Error contents is
374// supposed to be uncompressed plain text. Same as with a plain http.Error, this
375// must not be called if the header or any payload has already been sent.
khenaidooab1f7bd2019-11-14 14:00:27 -0500376func httpError(rsp http.ResponseWriter, err error) {
377 rsp.Header().Del(contentEncodingHeader)
378 http.Error(
379 rsp,
380 "An error has occurred while serving metrics:\n\n"+err.Error(),
381 http.StatusInternalServerError,
382 )
383}