2019-06-30 17:07:58 -05:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2019-05-20 11:59:20 -05:00
|
|
|
package caddyhttp
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-03-15 22:26:17 -05:00
|
|
|
"encoding/json"
|
2022-03-11 14:34:55 -05:00
|
|
|
"errors"
|
2019-05-20 11:59:20 -05:00
|
|
|
"fmt"
|
2019-05-22 15:14:26 -05:00
|
|
|
"net"
|
2019-05-20 11:59:20 -05:00
|
|
|
"net/http"
|
2019-09-05 14:36:42 -05:00
|
|
|
"net/url"
|
2020-04-26 23:28:49 -05:00
|
|
|
"runtime"
|
2019-06-26 17:03:29 -05:00
|
|
|
"strings"
|
2019-10-28 15:39:37 -05:00
|
|
|
"time"
|
2019-05-20 11:59:20 -05:00
|
|
|
|
2019-07-02 13:37:06 -05:00
|
|
|
"github.com/caddyserver/caddy/v2"
|
|
|
|
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
2022-03-04 15:44:59 -05:00
|
|
|
"github.com/caddyserver/certmagic"
|
2019-09-11 19:49:21 -05:00
|
|
|
"github.com/lucas-clemente/quic-go/http3"
|
2019-10-28 15:39:37 -05:00
|
|
|
"go.uber.org/zap"
|
|
|
|
"go.uber.org/zap/zapcore"
|
2019-05-20 11:59:20 -05:00
|
|
|
)
|
|
|
|
|
2019-12-10 15:36:46 -05:00
|
|
|
// Server describes an HTTP server.
|
2019-05-20 11:59:20 -05:00
|
|
|
type Server struct {
|
2019-12-29 15:16:34 -05:00
|
|
|
// Socket addresses to which to bind listeners. Accepts
|
|
|
|
// [network addresses](/docs/conventions#network-addresses)
|
2020-04-21 20:30:04 -05:00
|
|
|
// that may include port ranges. Listener addresses must
|
|
|
|
// be unique; they cannot be repeated across all defined
|
|
|
|
// servers.
|
2019-12-10 15:36:46 -05:00
|
|
|
Listen []string `json:"listen,omitempty"`
|
|
|
|
|
2020-03-15 22:26:17 -05:00
|
|
|
// A list of listener wrapper modules, which can modify the behavior
|
|
|
|
// of the base listener. They are applied in the given order.
|
|
|
|
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
|
|
|
|
|
2019-12-10 15:36:46 -05:00
|
|
|
// How long to allow a read from a client's upload. Setting this
|
|
|
|
// to a short, non-zero value can mitigate slowloris attacks, but
|
|
|
|
// may also affect legitimately slow clients.
|
|
|
|
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
|
|
|
|
|
|
|
|
// ReadHeaderTimeout is like ReadTimeout but for request headers.
|
|
|
|
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
|
|
|
|
|
|
|
|
// WriteTimeout is how long to allow a write to a client. Note
|
|
|
|
// that setting this to a small value when serving large files
|
|
|
|
// may negatively affect legitimately slow clients.
|
|
|
|
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
|
|
|
|
|
|
|
|
// IdleTimeout is the maximum time to wait for the next request
|
2020-11-18 12:57:54 -05:00
|
|
|
// when keep-alives are enabled. If zero, a default timeout of
|
|
|
|
// 5m is applied to help avoid resource exhaustion.
|
2019-12-10 15:36:46 -05:00
|
|
|
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
|
|
|
|
|
|
|
|
// MaxHeaderBytes is the maximum size to parse from a client's
|
|
|
|
// HTTP request headers.
|
|
|
|
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
|
|
|
|
|
|
|
|
// Routes describes how this server will handle requests.
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
// Routes are executed sequentially. First a route's matchers
|
|
|
|
// are evaluated, then its grouping. If it matches and has
|
|
|
|
// not been mutually-excluded by its grouping, then its
|
|
|
|
// handlers are executed sequentially. The sequence of invoked
|
|
|
|
// handlers comprises a compiled middleware chain that flows
|
|
|
|
// from each matching route and its handlers to the next.
|
2020-12-02 15:27:08 -05:00
|
|
|
//
|
|
|
|
// By default, all unrouted requests receive a 200 OK response
|
|
|
|
// to indicate the server is working.
|
2019-12-10 15:36:46 -05:00
|
|
|
Routes RouteList `json:"routes,omitempty"`
|
|
|
|
|
2019-12-23 14:45:35 -05:00
|
|
|
// Errors is how this server will handle errors returned from any
|
|
|
|
// of the handlers in the primary routes. If the primary handler
|
|
|
|
// chain returns an error, the error along with its recommended
|
|
|
|
// status code are bubbled back up to the HTTP server which
|
|
|
|
// executes a separate error route, specified using this property.
|
|
|
|
// The error routes work exactly like the normal routes.
|
2019-12-10 15:36:46 -05:00
|
|
|
Errors *HTTPErrorConfig `json:"errors,omitempty"`
|
|
|
|
|
2019-12-29 15:16:34 -05:00
|
|
|
// How to handle TLS connections. At least one policy is
|
|
|
|
// required to enable HTTPS on this server if automatic
|
|
|
|
// HTTPS is disabled or does not apply.
|
2019-12-10 15:36:46 -05:00
|
|
|
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
|
|
|
|
|
|
|
|
// AutoHTTPS configures or disables automatic HTTPS within this server.
|
|
|
|
// HTTPS is enabled automatically and by default when qualifying names
|
2019-12-29 15:16:34 -05:00
|
|
|
// are present in a Host matcher and/or when the server is listening
|
|
|
|
// only on the HTTPS port.
|
2019-12-10 15:36:46 -05:00
|
|
|
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
|
|
|
|
|
|
|
|
// If true, will require that a request's Host header match
|
|
|
|
// the value of the ServerName sent by the client's TLS
|
|
|
|
// ClientHello; often a necessary safeguard when using TLS
|
|
|
|
// client authentication.
|
|
|
|
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
|
|
|
|
|
2020-04-28 09:32:04 -05:00
|
|
|
// Enables access logging and configures how access logs are handled
|
|
|
|
// in this server. To minimally enable access logs, simply set this
|
|
|
|
// to a non-null, empty struct.
|
2019-12-10 15:36:46 -05:00
|
|
|
Logs *ServerLogConfig `json:"logs,omitempty"`
|
|
|
|
|
|
|
|
// Enable experimental HTTP/3 support. Note that HTTP/3 is not a
|
|
|
|
// finished standard and has extremely limited client support.
|
|
|
|
// This field is not subject to compatibility promises.
|
2019-09-10 09:03:37 -05:00
|
|
|
ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
|
|
|
|
|
2020-05-05 13:33:21 -05:00
|
|
|
// Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support,
|
|
|
|
// which will serve HTTP/2 over plaintext TCP connections if
|
2021-03-29 13:04:25 -05:00
|
|
|
// the client supports it. Because this is not implemented by the
|
2020-05-05 13:33:21 -05:00
|
|
|
// Go standard library, using H2C is incompatible with most
|
|
|
|
// of the other options for this server. Do not enable this
|
|
|
|
// only to achieve maximum client compatibility. In practice,
|
|
|
|
// very few clients implement H2C, and even fewer require it.
|
|
|
|
// This setting applies only to unencrypted HTTP listeners.
|
|
|
|
// ⚠️ Experimental feature; subject to change or removal.
|
|
|
|
AllowH2C bool `json:"allow_h2c,omitempty"`
|
|
|
|
|
2020-09-17 22:46:24 -05:00
|
|
|
name string
|
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
primaryHandlerChain Handler
|
|
|
|
errorHandlerChain Handler
|
2020-03-15 22:26:17 -05:00
|
|
|
listenerWrappers []caddy.ListenerWrapper
|
2022-08-01 14:36:22 -05:00
|
|
|
listeners []net.Listener
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
tlsApp *caddytls.TLS
|
|
|
|
logger *zap.Logger
|
|
|
|
accessLogger *zap.Logger
|
|
|
|
errorLogger *zap.Logger
|
2019-09-11 19:49:21 -05:00
|
|
|
|
|
|
|
h3server *http3.Server
|
2019-05-20 11:59:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServeHTTP is the entry point for all HTTP requests.
|
|
|
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
2019-06-28 20:28:47 -05:00
|
|
|
w.Header().Set("Server", "Caddy")
|
|
|
|
|
2019-09-11 19:49:21 -05:00
|
|
|
if s.h3server != nil {
|
|
|
|
err := s.h3server.SetQuicHeaders(w.Header())
|
|
|
|
if err != nil {
|
2019-10-28 15:39:37 -05:00
|
|
|
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
|
2019-09-11 19:49:21 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-19 15:44:09 -05:00
|
|
|
// reject very long methods; probably a mistake or an attack
|
|
|
|
if len(r.Method) > 32 {
|
|
|
|
if s.shouldLogRequest(r) {
|
|
|
|
s.accessLogger.Debug("rejecting request with long method",
|
|
|
|
zap.String("method_trunc", r.Method[:32]),
|
|
|
|
zap.String("remote_addr", r.RemoteAddr))
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-14 12:58:28 -05:00
|
|
|
repl := caddy.NewReplacer()
|
2020-05-11 13:14:47 -05:00
|
|
|
r = PrepareRequest(r, repl, w, s)
|
2019-05-21 00:48:43 -05:00
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
// encode the request for logging purposes before
|
|
|
|
// it enters any handler chain; this is necessary
|
|
|
|
// to capture the original request in case it gets
|
|
|
|
// modified during handling
|
2021-12-02 15:26:24 -05:00
|
|
|
shouldLogCredentials := s.Logs != nil && s.Logs.ShouldLogCredentials
|
|
|
|
loggableReq := zap.Object("request", LoggableHTTPRequest{
|
|
|
|
Request: r,
|
|
|
|
ShouldLogCredentials: shouldLogCredentials,
|
|
|
|
})
|
2020-04-28 16:38:45 -05:00
|
|
|
errLog := s.errorLogger.With(loggableReq)
|
|
|
|
|
|
|
|
var duration time.Duration
|
2019-10-28 15:39:37 -05:00
|
|
|
|
2020-04-28 09:32:04 -05:00
|
|
|
if s.shouldLogRequest(r) {
|
2019-10-28 15:39:37 -05:00
|
|
|
wrec := NewResponseRecorder(w, nil, nil)
|
|
|
|
w = wrec
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
|
|
|
|
// capture the original version of the request
|
|
|
|
accLog := s.accessLogger.With(loggableReq)
|
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
defer func() {
|
2020-03-30 12:49:53 -05:00
|
|
|
repl.Set("http.response.status", wrec.Status())
|
|
|
|
repl.Set("http.response.size", wrec.Size())
|
2020-04-28 16:38:45 -05:00
|
|
|
repl.Set("http.response.duration", duration)
|
2022-04-11 14:04:05 -05:00
|
|
|
repl.Set("http.response.duration_ms", duration.Seconds()*1e3) // multiply seconds to preserve decimal (see #4666)
|
2019-10-28 15:39:37 -05:00
|
|
|
|
|
|
|
logger := accLog
|
2020-04-10 09:12:42 -05:00
|
|
|
if s.Logs != nil {
|
|
|
|
logger = s.Logs.wrapLogger(logger, r.Host)
|
2019-10-28 15:39:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
log := logger.Info
|
|
|
|
if wrec.Status() >= 400 {
|
|
|
|
log = logger.Error
|
|
|
|
}
|
|
|
|
|
2021-07-14 12:07:38 -05:00
|
|
|
userID, _ := repl.GetString("http.auth.user.id")
|
|
|
|
|
2019-11-05 18:28:33 -05:00
|
|
|
log("handled request",
|
2021-07-14 12:07:38 -05:00
|
|
|
zap.String("user_id", userID),
|
2020-04-28 16:38:45 -05:00
|
|
|
zap.Duration("duration", duration),
|
2019-10-28 15:39:37 -05:00
|
|
|
zap.Int("size", wrec.Size()),
|
|
|
|
zap.Int("status", wrec.Status()),
|
2021-12-02 15:26:24 -05:00
|
|
|
zap.Object("resp_headers", LoggableHTTPHeader{
|
|
|
|
Header: wrec.Header(),
|
|
|
|
ShouldLogCredentials: shouldLogCredentials,
|
|
|
|
}),
|
2019-10-28 15:39:37 -05:00
|
|
|
)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2020-04-28 16:38:45 -05:00
|
|
|
start := time.Now()
|
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
// guarantee ACME HTTP challenges; handle them
|
|
|
|
// separately from any user-defined handlers
|
|
|
|
if s.tlsApp.HandleHTTPChallenge(w, r) {
|
2020-04-28 16:38:45 -05:00
|
|
|
duration = time.Since(start)
|
2019-10-28 15:39:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-09 12:00:13 -05:00
|
|
|
// execute the primary handler chain
|
|
|
|
err := s.primaryHandlerChain.ServeHTTP(w, r)
|
2020-04-28 16:38:45 -05:00
|
|
|
duration = time.Since(start)
|
2019-05-20 11:59:20 -05:00
|
|
|
|
2020-04-28 16:38:45 -05:00
|
|
|
// if no errors, we're done!
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-13 11:52:39 -05:00
|
|
|
// restore original request before invoking error handler chain (issue #3717)
|
|
|
|
// TODO: this does not restore original headers, if modified (for efficiency)
|
|
|
|
origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
|
|
|
|
r.Method = origReq.Method
|
|
|
|
r.RemoteAddr = origReq.RemoteAddr
|
|
|
|
r.RequestURI = origReq.RequestURI
|
|
|
|
cloneURL(origReq.URL, r.URL)
|
|
|
|
|
2020-04-28 16:38:45 -05:00
|
|
|
// prepare the error log
|
|
|
|
logger := errLog
|
|
|
|
if s.Logs != nil {
|
|
|
|
logger = s.Logs.wrapLogger(logger, r.Host)
|
|
|
|
}
|
|
|
|
logger = logger.With(zap.Duration("duration", duration))
|
|
|
|
|
|
|
|
// get the values that will be used to log the error
|
|
|
|
errStatus, errMsg, errFields := errLogValues(err)
|
|
|
|
|
|
|
|
// add HTTP error information to request context
|
|
|
|
r = s.Errors.WithError(r, err)
|
|
|
|
|
|
|
|
if s.Errors != nil && len(s.Errors.Routes) > 0 {
|
|
|
|
// execute user-defined error handling route
|
|
|
|
err2 := s.errorHandlerChain.ServeHTTP(w, r)
|
|
|
|
if err2 == nil {
|
|
|
|
// user's error route handled the error response
|
|
|
|
// successfully, so now just log the error
|
2022-02-19 17:10:49 -05:00
|
|
|
logger.Debug(errMsg, errFields...)
|
2020-04-28 16:38:45 -05:00
|
|
|
} else {
|
|
|
|
// well... this is awkward
|
|
|
|
errFields = append([]zapcore.Field{
|
|
|
|
zap.String("error", err2.Error()),
|
|
|
|
zap.Namespace("first_error"),
|
|
|
|
zap.String("msg", errMsg),
|
|
|
|
}, errFields...)
|
|
|
|
logger.Error("error handling handler error", errFields...)
|
2020-11-18 18:14:50 -05:00
|
|
|
if handlerErr, ok := err.(HandlerError); ok {
|
|
|
|
w.WriteHeader(handlerErr.StatusCode)
|
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusInternalServerError)
|
|
|
|
}
|
2020-04-28 16:38:45 -05:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if errStatus >= 500 {
|
|
|
|
logger.Error(errMsg, errFields...)
|
2021-11-22 13:58:25 -05:00
|
|
|
} else {
|
|
|
|
logger.Debug(errMsg, errFields...)
|
2019-05-20 11:59:20 -05:00
|
|
|
}
|
2020-04-28 16:38:45 -05:00
|
|
|
w.WriteHeader(errStatus)
|
2019-05-20 11:59:20 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 17:03:29 -05:00
|
|
|
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
|
|
|
|
// in s.enforcementHandler which performs crucial security checks, etc.
|
|
|
|
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
|
|
|
|
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
|
|
|
return s.enforcementHandler(w, r, stack)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// enforcementHandler is an implicit middleware which performs
|
|
|
|
// standard checks before executing the HTTP middleware chain.
|
|
|
|
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
|
|
|
|
// enforce strict host matching, which ensures that the SNI
|
|
|
|
// value (if any), matches the Host header; essential for
|
|
|
|
// servers that rely on TLS ClientAuth sharing a listener
|
|
|
|
// with servers that do not; if not enforced, client could
|
|
|
|
// bypass by sending benign SNI then restricted Host header
|
2019-09-18 00:13:21 -05:00
|
|
|
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
|
2019-06-26 17:03:29 -05:00
|
|
|
hostname, _, err := net.SplitHostPort(r.Host)
|
|
|
|
if err != nil {
|
|
|
|
hostname = r.Host // OK; probably lacked port
|
|
|
|
}
|
2019-10-15 16:37:46 -05:00
|
|
|
if !strings.EqualFold(r.TLS.ServerName, hostname) {
|
2019-06-26 17:03:29 -05:00
|
|
|
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
|
|
|
|
r.TLS.ServerName, hostname)
|
|
|
|
r.Close = true
|
2022-01-12 16:24:22 -05:00
|
|
|
return Error(http.StatusMisdirectedRequest, err)
|
2019-06-26 17:03:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return next.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
|
2019-07-09 13:58:39 -05:00
|
|
|
// listenersUseAnyPortOtherThan returns true if there are any
|
|
|
|
// listeners in s that use a port which is not otherPort.
|
2019-05-22 15:14:26 -05:00
|
|
|
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
|
|
|
|
for _, lnAddr := range s.Listen {
|
2019-11-11 17:33:38 -05:00
|
|
|
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
|
|
|
|
return true
|
2019-05-22 15:14:26 -05:00
|
|
|
}
|
|
|
|
}
|
2019-09-09 09:25:48 -05:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
// hasListenerAddress returns true if s has a listener
|
|
|
|
// at the given address fullAddr. Currently, fullAddr
|
|
|
|
// must represent exactly one socket address (port
|
|
|
|
// ranges are not supported)
|
2019-09-18 19:01:32 -05:00
|
|
|
func (s *Server) hasListenerAddress(fullAddr string) bool {
|
2019-11-11 17:33:38 -05:00
|
|
|
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
|
2019-09-18 19:01:32 -05:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
if laddrs.PortRangeSize() != 1 {
|
|
|
|
return false // TODO: support port ranges
|
2019-09-18 19:01:32 -05:00
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
|
2019-09-09 09:25:48 -05:00
|
|
|
for _, lnAddr := range s.Listen {
|
2019-11-11 17:33:38 -05:00
|
|
|
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
|
2019-09-18 19:01:32 -05:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
if thisAddrs.Network != laddrs.Network {
|
2019-09-18 19:01:32 -05:00
|
|
|
continue
|
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
|
2020-04-26 23:28:49 -05:00
|
|
|
// Apparently, Linux requires all bound ports to be distinct
|
|
|
|
// *regardless of host interface* even if the addresses are
|
|
|
|
// in fact different; binding "192.168.0.1:9000" and then
|
|
|
|
// ":9000" will fail for ":9000" because "address is already
|
|
|
|
// in use" even though it's not, and the same bindings work
|
|
|
|
// fine on macOS. I also found on Linux that listening on
|
|
|
|
// "[::]:9000" would fail with a similar error, except with
|
|
|
|
// the address "0.0.0.0:9000", as if deliberately ignoring
|
|
|
|
// that I specified the IPv6 interface explicitly. This seems
|
|
|
|
// to be a major bug in the Linux network stack and I don't
|
|
|
|
// know why it hasn't been fixed yet, so for now we have to
|
|
|
|
// special-case ourselves around Linux like a doting parent.
|
|
|
|
// The second issue seems very similar to a discussion here:
|
|
|
|
// https://github.com/nodejs/node/issues/9390
|
|
|
|
//
|
|
|
|
// This is very easy to reproduce by creating an HTTP server
|
|
|
|
// that listens to both addresses or just one with a host
|
|
|
|
// interface; or for a more confusing reproduction, try
|
|
|
|
// listening on "127.0.0.1:80" and ":443" and you'll see
|
|
|
|
// the error, if you take away the GOOS condition below.
|
|
|
|
//
|
|
|
|
// So, an address is equivalent if the port is in the port
|
|
|
|
// range, and if not on Linux, the host is the same... sigh.
|
|
|
|
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
|
2019-11-11 17:33:38 -05:00
|
|
|
(laddrs.StartPort <= thisAddrs.EndPort) &&
|
|
|
|
(laddrs.StartPort >= thisAddrs.StartPort) {
|
|
|
|
return true
|
2019-09-09 09:25:48 -05:00
|
|
|
}
|
|
|
|
}
|
2019-05-22 15:14:26 -05:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-09-03 10:35:36 -05:00
|
|
|
func (s *Server) hasTLSClientAuth() bool {
|
|
|
|
for _, cp := range s.TLSConnPolicies {
|
2019-09-07 15:25:04 -05:00
|
|
|
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
|
2019-09-03 10:35:36 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-04-19 20:54:12 -05:00
|
|
|
// findLastRouteWithHostMatcher returns the index of the last route
|
|
|
|
// in the server which has a host matcher. Used during Automatic HTTPS
|
|
|
|
// to determine where to insert the HTTP->HTTPS redirect route, such
|
|
|
|
// that it is after any other host matcher but before any "catch-all"
|
|
|
|
// route without a host matcher.
|
|
|
|
func (s *Server) findLastRouteWithHostMatcher() int {
|
2021-07-14 11:49:34 -05:00
|
|
|
foundHostMatcher := false
|
2021-04-19 20:54:12 -05:00
|
|
|
lastIndex := len(s.Routes)
|
2021-07-14 11:49:34 -05:00
|
|
|
|
2021-04-19 20:54:12 -05:00
|
|
|
for i, route := range s.Routes {
|
|
|
|
// since we want to break out of an inner loop, use a closure
|
|
|
|
// to allow us to use 'return' when we found a host matcher
|
|
|
|
found := (func() bool {
|
|
|
|
for _, sets := range route.MatcherSets {
|
|
|
|
for _, matcher := range sets {
|
|
|
|
switch matcher.(type) {
|
|
|
|
case *MatchHost:
|
2021-07-14 11:49:34 -05:00
|
|
|
foundHostMatcher = true
|
2021-04-19 20:54:12 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})()
|
|
|
|
|
|
|
|
// if we found the host matcher, change the lastIndex to
|
|
|
|
// just after the current route
|
|
|
|
if found {
|
|
|
|
lastIndex = i + 1
|
|
|
|
}
|
|
|
|
}
|
2021-07-14 11:49:34 -05:00
|
|
|
|
|
|
|
// If we didn't actually find a host matcher, return 0
|
|
|
|
// because that means every defined route was a "catch-all".
|
|
|
|
// See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8
|
|
|
|
if !foundHostMatcher {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-04-19 20:54:12 -05:00
|
|
|
return lastIndex
|
|
|
|
}
|
|
|
|
|
2019-06-26 11:49:32 -05:00
|
|
|
// HTTPErrorConfig determines how to handle errors
|
|
|
|
// from the HTTP handlers.
|
|
|
|
type HTTPErrorConfig struct {
|
2019-12-23 14:45:35 -05:00
|
|
|
// The routes to evaluate after the primary handler
|
|
|
|
// chain returns an error. In an error route, extra
|
|
|
|
// placeholders are available:
|
|
|
|
//
|
2020-02-14 13:00:46 -05:00
|
|
|
// Placeholder | Description
|
|
|
|
// ------------|---------------
|
|
|
|
// `{http.error.status_code}` | The recommended HTTP status code
|
|
|
|
// `{http.error.status_text}` | The status text associated with the recommended status code
|
|
|
|
// `{http.error.message}` | The error message
|
|
|
|
// `{http.error.trace}` | The origin of the error
|
|
|
|
// `{http.error.id}` | An identifier for this occurrence of the error
|
2019-05-22 13:32:36 -05:00
|
|
|
Routes RouteList `json:"routes,omitempty"`
|
2019-05-20 11:59:20 -05:00
|
|
|
}
|
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
// WithError makes a shallow copy of r to add the error to its
|
|
|
|
// context, and sets placeholders on the request's replacer
|
|
|
|
// related to err. It returns the modified request which has
|
|
|
|
// the error information in its context and replacer. It
|
|
|
|
// overwrites any existing error values that are stored.
|
|
|
|
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
|
|
|
|
// add the raw error value to the request context
|
|
|
|
// so it can be accessed by error handlers
|
|
|
|
c := context.WithValue(r.Context(), ErrorCtxKey, err)
|
|
|
|
r = r.WithContext(c)
|
|
|
|
|
|
|
|
// add error values to the replacer
|
2019-12-29 15:12:52 -05:00
|
|
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
2020-03-30 12:49:53 -05:00
|
|
|
repl.Set("http.error", err)
|
2019-10-28 15:39:37 -05:00
|
|
|
if handlerErr, ok := err.(HandlerError); ok {
|
2020-03-30 12:49:53 -05:00
|
|
|
repl.Set("http.error.status_code", handlerErr.StatusCode)
|
2019-10-28 15:39:37 -05:00
|
|
|
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
|
|
|
|
repl.Set("http.error.trace", handlerErr.Trace)
|
|
|
|
repl.Set("http.error.id", handlerErr.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2020-04-28 09:32:04 -05:00
|
|
|
// shouldLogRequest returns true if this request should be logged.
|
|
|
|
func (s *Server) shouldLogRequest(r *http.Request) bool {
|
|
|
|
if s.accessLogger == nil || s.Logs == nil {
|
|
|
|
// logging is disabled
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, dh := range s.Logs.SkipHosts {
|
|
|
|
// logging for this particular host is disabled
|
2022-03-04 15:44:59 -05:00
|
|
|
if certmagic.MatchWildcard(r.Host, dh) {
|
2020-04-28 09:32:04 -05:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, ok := s.Logs.LoggerNames[r.Host]; ok {
|
|
|
|
// this host is mapped to a particular logger name
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if s.Logs.SkipUnmappedHosts {
|
|
|
|
// this host is not mapped and thus must not be logged
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServerLogConfig describes a server's logging configuration. If
|
|
|
|
// enabled without customization, all requests to this server are
|
|
|
|
// logged to the default logger; logger destinations may be
|
|
|
|
// customized per-request-host.
|
2019-10-28 15:39:37 -05:00
|
|
|
type ServerLogConfig struct {
|
2020-04-28 09:32:04 -05:00
|
|
|
// The default logger name for all logs emitted by this server for
|
|
|
|
// hostnames that are not in the LoggerNames (logger_names) map.
|
|
|
|
DefaultLoggerName string `json:"default_logger_name,omitempty"`
|
2020-04-08 15:39:20 -05:00
|
|
|
|
2019-12-12 16:31:20 -05:00
|
|
|
// LoggerNames maps request hostnames to a custom logger name.
|
|
|
|
// For example, a mapping of "example.com" to "example" would
|
|
|
|
// cause access logs from requests with a Host of example.com
|
|
|
|
// to be emitted by a logger named "http.log.access.example".
|
2019-10-28 15:39:37 -05:00
|
|
|
LoggerNames map[string]string `json:"logger_names,omitempty"`
|
2020-04-28 09:32:04 -05:00
|
|
|
|
|
|
|
// By default, all requests to this server will be logged if
|
|
|
|
// access logging is enabled. This field lists the request
|
|
|
|
// hosts for which access logging should be disabled.
|
|
|
|
SkipHosts []string `json:"skip_hosts,omitempty"`
|
|
|
|
|
|
|
|
// If true, requests to any host not appearing in the
|
|
|
|
// LoggerNames (logger_names) map will not be logged.
|
|
|
|
SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
|
2021-12-02 15:26:24 -05:00
|
|
|
|
|
|
|
// If true, credentials that are otherwise omitted, will be logged.
|
|
|
|
// The definition of credentials is defined by https://fetch.spec.whatwg.org/#credentials,
|
|
|
|
// and this includes some request and response headers, i.e `Cookie`,
|
|
|
|
// `Set-Cookie`, `Authorization`, and `Proxy-Authorization`.
|
|
|
|
ShouldLogCredentials bool `json:"should_log_credentials,omitempty"`
|
2019-10-28 15:39:37 -05:00
|
|
|
}
|
|
|
|
|
2020-04-10 09:12:42 -05:00
|
|
|
// wrapLogger wraps logger in a logger named according to user preferences for the given host.
|
|
|
|
func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger {
|
|
|
|
if loggerName := slc.getLoggerName(host); loggerName != "" {
|
|
|
|
return logger.Named(loggerName)
|
|
|
|
}
|
|
|
|
return logger
|
|
|
|
}
|
|
|
|
|
2020-04-08 15:39:20 -05:00
|
|
|
func (slc ServerLogConfig) getLoggerName(host string) string {
|
2020-06-26 13:01:50 -05:00
|
|
|
tryHost := func(key string) (string, bool) {
|
|
|
|
// first try exact match
|
|
|
|
if loggerName, ok := slc.LoggerNames[key]; ok {
|
|
|
|
return loggerName, ok
|
|
|
|
}
|
|
|
|
// strip port and try again (i.e. Host header of "example.com:1234" should
|
|
|
|
// match "example.com" if there is no "example.com:1234" in the map)
|
|
|
|
hostOnly, _, err := net.SplitHostPort(key)
|
|
|
|
if err != nil {
|
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
loggerName, ok := slc.LoggerNames[hostOnly]
|
|
|
|
return loggerName, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
// try the exact hostname first
|
|
|
|
if loggerName, ok := tryHost(host); ok {
|
2020-04-08 15:39:20 -05:00
|
|
|
return loggerName
|
|
|
|
}
|
2020-05-11 15:17:59 -05:00
|
|
|
|
2020-06-26 13:01:50 -05:00
|
|
|
// try matching wildcard domains if other non-specific loggers exist
|
2020-05-11 15:17:59 -05:00
|
|
|
labels := strings.Split(host, ".")
|
|
|
|
for i := range labels {
|
|
|
|
if labels[i] == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
labels[i] = "*"
|
|
|
|
wildcardHost := strings.Join(labels, ".")
|
2020-06-26 13:01:50 -05:00
|
|
|
if loggerName, ok := tryHost(wildcardHost); ok {
|
2020-05-11 15:17:59 -05:00
|
|
|
return loggerName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-28 09:32:04 -05:00
|
|
|
return slc.DefaultLoggerName
|
2020-04-08 15:39:20 -05:00
|
|
|
}
|
|
|
|
|
2020-05-11 13:14:47 -05:00
|
|
|
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
|
|
|
|
// be nil, but the handlers will lose response placeholders and access to the server.
|
|
|
|
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
|
|
|
|
// set up the context for the request
|
|
|
|
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
|
|
|
|
ctx = context.WithValue(ctx, ServerCtxKey, s)
|
2022-08-02 15:39:09 -05:00
|
|
|
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]any))
|
2020-05-11 13:14:47 -05:00
|
|
|
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
|
|
|
|
var url2 url.URL // avoid letting this escape to the heap
|
|
|
|
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
|
|
|
|
r = r.WithContext(ctx)
|
|
|
|
|
|
|
|
// once the pointer to the request won't change
|
|
|
|
// anymore, finish setting up the replacer
|
|
|
|
addHTTPVarsToReplacer(repl, r, w)
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
// errLogValues inspects err and returns the status code
|
|
|
|
// to use, the error log message, and any extra fields.
|
|
|
|
// If err is a HandlerError, the returned values will
|
|
|
|
// have richer information.
|
|
|
|
func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
|
2022-03-11 14:34:55 -05:00
|
|
|
var handlerErr HandlerError
|
|
|
|
if errors.As(err, &handlerErr) {
|
2019-10-28 15:39:37 -05:00
|
|
|
status = handlerErr.StatusCode
|
2019-10-30 22:41:52 -05:00
|
|
|
if handlerErr.Err == nil {
|
|
|
|
msg = err.Error()
|
|
|
|
} else {
|
|
|
|
msg = handlerErr.Err.Error()
|
|
|
|
}
|
2019-10-28 15:39:37 -05:00
|
|
|
fields = []zapcore.Field{
|
|
|
|
zap.Int("status", handlerErr.StatusCode),
|
|
|
|
zap.String("err_id", handlerErr.ID),
|
|
|
|
zap.String("err_trace", handlerErr.Trace),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
status = http.StatusInternalServerError
|
|
|
|
msg = err.Error()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// originalRequest returns a partial, shallow copy of
|
|
|
|
// req, including: req.Method, deep copy of req.URL
|
|
|
|
// (into the urlCopy parameter, which should be on the
|
2019-11-05 18:28:33 -05:00
|
|
|
// stack), req.RequestURI, and req.RemoteAddr. Notably,
|
|
|
|
// headers are not copied. This function is designed to
|
2019-12-04 18:28:13 -05:00
|
|
|
// be very fast and efficient, and useful primarily for
|
2019-11-05 18:28:33 -05:00
|
|
|
// read-only/logging purposes.
|
2019-10-28 15:39:37 -05:00
|
|
|
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
|
2019-11-05 18:28:33 -05:00
|
|
|
cloneURL(req.URL, urlCopy)
|
2019-10-28 15:39:37 -05:00
|
|
|
return http.Request{
|
|
|
|
Method: req.Method,
|
2019-11-05 18:28:33 -05:00
|
|
|
RemoteAddr: req.RemoteAddr,
|
2019-10-28 15:39:37 -05:00
|
|
|
RequestURI: req.RequestURI,
|
|
|
|
URL: urlCopy,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-05 14:36:42 -05:00
|
|
|
// cloneURL makes a copy of r.URL and returns a
|
|
|
|
// new value that doesn't reference the original.
|
2019-11-05 18:28:33 -05:00
|
|
|
func cloneURL(from, to *url.URL) {
|
|
|
|
*to = *from
|
|
|
|
if from.User != nil {
|
2019-09-05 14:36:42 -05:00
|
|
|
userInfo := new(url.Userinfo)
|
2019-11-05 18:28:33 -05:00
|
|
|
*userInfo = *from.User
|
|
|
|
to.User = userInfo
|
2019-09-05 14:36:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 11:49:32 -05:00
|
|
|
// Context keys for HTTP request context values.
|
|
|
|
const (
|
|
|
|
// For referencing the server instance
|
|
|
|
ServerCtxKey caddy.CtxKey = "server"
|
2019-06-18 12:13:12 -05:00
|
|
|
|
2019-08-09 13:05:47 -05:00
|
|
|
// For the request's variable table
|
2019-10-28 15:39:37 -05:00
|
|
|
VarsCtxKey caddy.CtxKey = "vars"
|
2019-09-05 14:36:42 -05:00
|
|
|
|
2019-10-28 15:39:37 -05:00
|
|
|
// For a partial copy of the unmodified request that
|
|
|
|
// originally came into the server's entry handler
|
|
|
|
OriginalRequestCtxKey caddy.CtxKey = "original_request"
|
2019-06-26 11:49:32 -05:00
|
|
|
)
|