1
Fork 0
mirror of https://github.com/caddyserver/caddy.git synced 2024-12-16 21:56:40 -05:00

Revert "reverseproxy: Separate ignore_client_gone option"

This reverts commit 8c9e87d0a6.
This commit is contained in:
Matthew Holt 2024-11-12 14:36:25 -07:00
parent 8c9e87d0a6
commit 04a58a9356

View file

@ -106,15 +106,12 @@ type Handler struct {
// response is recognized as a streaming response, or if its
// content length is -1; for such responses, writes are flushed
// to the client immediately.
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
//
// Normally, a request will be canceled if the client disconnects
// before the response is received from the backend. If enabled,
// client disconnection will be ignored and the request with the
// backend will carry on until the backend terminates it. This
// can help facilitate low-latency streaming. See #4922 and #4952.
// EXPERIMENTAL: Will likely be removed in the future.
IgnoreClientGone bool `json:"ignore_client_gone,omitempty"`
// before the response is received from the backend. If explicitly
// set to -1, client disconnection will be ignored and the request
// will be completed to help facilitate low-latency streaming.
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// A list of IP ranges (supports CIDR notation) from which
// X-Forwarded-* header values should be trusted. By default,
@ -776,15 +773,12 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origRe
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
// if enabled, don't let the transport cancel the request if the client disconnects:
// user probably wants us to finish sending the data to the upstream regardless,
// and we should expect client disconnection in low-latency streaming scenarios
// (see issue #4922)
// TODO: An ideal solution, if the client disconnects before the backend is done
// receiving data from the proxy, is to wait until the baxkend is done receiving
// the data and then close the connection with the backend, rather than an explicit
// option to always leave it open...
if h.IgnoreClientGone {
// if FlushInterval is explicitly configured to -1 (i.e. flush continuously to achieve
// low-latency streaming), don't let the transport cancel the request if the client
// disconnects: user probably wants us to finish sending the data to the upstream
// regardless, and we should expect client disconnection in low-latency streaming
// scenarios (see issue #4922)
if h.FlushInterval == -1 {
req = req.WithContext(ignoreClientGoneContext{req.Context()})
}