mirror of
https://github.com/caddyserver/caddy.git
synced 2024-12-16 21:56:40 -05:00
reverseproxy: Add more debug logs
This makes debug logging very noisy when reverse proxying, but I guess that's the point. This has shown to be useful in troubleshooting infrastructure issues.
This commit is contained in:
parent
c6f34011fb
commit
432f174623
3 changed files with 28 additions and 6 deletions
|
@ -977,9 +977,11 @@ func (h Handler) finalizeResponse(
|
||||||
}
|
}
|
||||||
|
|
||||||
rw.WriteHeader(res.StatusCode)
|
rw.WriteHeader(res.StatusCode)
|
||||||
|
logger.Debug("wrote header")
|
||||||
|
|
||||||
err := h.copyResponse(rw, res.Body, h.flushInterval(req, res))
|
err := h.copyResponse(rw, res.Body, h.flushInterval(req, res), logger)
|
||||||
res.Body.Close() // close now, instead of defer, to populate res.Trailer
|
errClose := res.Body.Close() // close now, instead of defer, to populate res.Trailer
|
||||||
|
logger.Debug("closed response body from upstream", zap.Error(errClose))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// we're streaming the response and we've already written headers, so
|
// we're streaming the response and we've already written headers, so
|
||||||
// there's nothing an error handler can do to recover at this point;
|
// there's nothing an error handler can do to recover at this point;
|
||||||
|
@ -1014,6 +1016,8 @@ func (h Handler) finalizeResponse(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Debug("response finalized")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,12 +166,13 @@ func (h Handler) isBidirectionalStream(req *http.Request, res *http.Response) bo
|
||||||
(ae == "identity" || ae == "")
|
(ae == "identity" || ae == "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error {
|
func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration, logger *zap.Logger) error {
|
||||||
if flushInterval != 0 {
|
if flushInterval != 0 {
|
||||||
if wf, ok := dst.(writeFlusher); ok {
|
if wf, ok := dst.(writeFlusher); ok {
|
||||||
mlw := &maxLatencyWriter{
|
mlw := &maxLatencyWriter{
|
||||||
dst: wf,
|
dst: wf,
|
||||||
latency: flushInterval,
|
latency: flushInterval,
|
||||||
|
logger: logger.Named("max_latency_writer"),
|
||||||
}
|
}
|
||||||
defer mlw.stop()
|
defer mlw.stop()
|
||||||
|
|
||||||
|
@ -185,19 +186,22 @@ func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.D
|
||||||
|
|
||||||
buf := streamingBufPool.Get().(*[]byte)
|
buf := streamingBufPool.Get().(*[]byte)
|
||||||
defer streamingBufPool.Put(buf)
|
defer streamingBufPool.Put(buf)
|
||||||
_, err := h.copyBuffer(dst, src, *buf)
|
_, err := h.copyBuffer(dst, src, *buf, logger)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyBuffer returns any write errors or non-EOF read errors, and the amount
|
// copyBuffer returns any write errors or non-EOF read errors, and the amount
|
||||||
// of bytes written.
|
// of bytes written.
|
||||||
func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) {
|
func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte, logger *zap.Logger) (int64, error) {
|
||||||
if len(buf) == 0 {
|
if len(buf) == 0 {
|
||||||
buf = make([]byte, defaultBufferSize)
|
buf = make([]byte, defaultBufferSize)
|
||||||
}
|
}
|
||||||
var written int64
|
var written int64
|
||||||
for {
|
for {
|
||||||
|
logger.Debug("waiting to read from upstream")
|
||||||
nr, rerr := src.Read(buf)
|
nr, rerr := src.Read(buf)
|
||||||
|
logger := logger.With(zap.Int("read", nr))
|
||||||
|
logger.Debug("read from upstream", zap.Error(rerr))
|
||||||
if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
|
if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
|
||||||
// TODO: this could be useful to know (indeed, it revealed an error in our
|
// TODO: this could be useful to know (indeed, it revealed an error in our
|
||||||
// fastcgi PoC earlier; but it's this single error report here that necessitates
|
// fastcgi PoC earlier; but it's this single error report here that necessitates
|
||||||
|
@ -209,10 +213,15 @@ func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, er
|
||||||
h.logger.Error("reading from backend", zap.Error(rerr))
|
h.logger.Error("reading from backend", zap.Error(rerr))
|
||||||
}
|
}
|
||||||
if nr > 0 {
|
if nr > 0 {
|
||||||
|
logger.Debug("writing to downstream")
|
||||||
nw, werr := dst.Write(buf[:nr])
|
nw, werr := dst.Write(buf[:nr])
|
||||||
if nw > 0 {
|
if nw > 0 {
|
||||||
written += int64(nw)
|
written += int64(nw)
|
||||||
}
|
}
|
||||||
|
logger.Debug("wrote to downstream",
|
||||||
|
zap.Int("written", nw),
|
||||||
|
zap.Int64("written_total", written),
|
||||||
|
zap.Error(werr))
|
||||||
if werr != nil {
|
if werr != nil {
|
||||||
return written, werr
|
return written, werr
|
||||||
}
|
}
|
||||||
|
@ -295,17 +304,21 @@ type maxLatencyWriter struct {
|
||||||
mu sync.Mutex // protects t, flushPending, and dst.Flush
|
mu sync.Mutex // protects t, flushPending, and dst.Flush
|
||||||
t *time.Timer
|
t *time.Timer
|
||||||
flushPending bool
|
flushPending bool
|
||||||
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
|
func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
n, err = m.dst.Write(p)
|
n, err = m.dst.Write(p)
|
||||||
|
m.logger.Debug("wrote bytes", zap.Int("n", n), zap.Error(err))
|
||||||
if m.latency < 0 {
|
if m.latency < 0 {
|
||||||
|
m.logger.Debug("flushing immediately")
|
||||||
m.dst.Flush()
|
m.dst.Flush()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m.flushPending {
|
if m.flushPending {
|
||||||
|
m.logger.Debug("delayed flush already pending")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if m.t == nil {
|
if m.t == nil {
|
||||||
|
@ -313,6 +326,7 @@ func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
|
||||||
} else {
|
} else {
|
||||||
m.t.Reset(m.latency)
|
m.t.Reset(m.latency)
|
||||||
}
|
}
|
||||||
|
m.logger.Debug("timer set for delayed flush", zap.Duration("duration", m.latency))
|
||||||
m.flushPending = true
|
m.flushPending = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -321,8 +335,10 @@ func (m *maxLatencyWriter) delayedFlush() {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
|
if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
|
||||||
|
m.logger.Debug("delayed flush is not pending")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.logger.Debug("delayed flush")
|
||||||
m.dst.Flush()
|
m.dst.Flush()
|
||||||
m.flushPending = false
|
m.flushPending = false
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/caddyserver/caddy/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHandlerCopyResponse(t *testing.T) {
|
func TestHandlerCopyResponse(t *testing.T) {
|
||||||
|
@ -18,7 +20,7 @@ func TestHandlerCopyResponse(t *testing.T) {
|
||||||
for _, d := range testdata {
|
for _, d := range testdata {
|
||||||
src := bytes.NewBuffer([]byte(d))
|
src := bytes.NewBuffer([]byte(d))
|
||||||
dst.Reset()
|
dst.Reset()
|
||||||
err := h.copyResponse(dst, src, 0)
|
err := h.copyResponse(dst, src, 0, caddy.Log())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed with error: %v", err)
|
t.Errorf("failed with error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue