aboutsummaryrefslogtreecommitdiffhomepage
path: root/modules/caddyhttp/reverseproxy/reverseproxy.go
diff options
context:
space:
mode:
Diffstat (limited to 'modules/caddyhttp/reverseproxy/reverseproxy.go')
-rw-r--r--modules/caddyhttp/reverseproxy/reverseproxy.go85
1 files changed, 56 insertions, 29 deletions
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
index 123bf774b..f9485c570 100644
--- a/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -17,6 +17,8 @@ package reverseproxy
import (
"bytes"
"context"
+ "crypto/rand"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -108,11 +110,6 @@ type Handler struct {
// response is recognized as a streaming response, or if its
// content length is -1; for such responses, writes are flushed
// to the client immediately.
- //
- // Normally, a request will be canceled if the client disconnects
- // before the response is received from the backend. If explicitly
- // set to -1, client disconnection will be ignored and the request
- // will be completed to help facilitate low-latency streaming.
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// A list of IP ranges (supports CIDR notation) from which
@@ -399,6 +396,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyht
return caddyhttp.Error(http.StatusInternalServerError,
fmt.Errorf("preparing request for upstream round-trip: %v", err))
}
+ // websocket over http2, assuming backend doesn't support this, the request will be modified to http1.1 upgrade
+ // TODO: once we can reliably detect backend support this, it can be removed for those backends
+ if r.ProtoMajor == 2 && r.Method == http.MethodConnect && r.Header.Get(":protocol") == "websocket" {
+ clonedReq.Header.Del(":protocol")
+ // keep the body for later use. http1.1 upgrade uses http.NoBody
+ caddyhttp.SetVar(clonedReq.Context(), "h2_websocket_body", clonedReq.Body)
+ clonedReq.Body = http.NoBody
+ clonedReq.Method = http.MethodGet
+ clonedReq.Header.Set("Upgrade", "websocket")
+ clonedReq.Header.Set("Connection", "Upgrade")
+ key := make([]byte, 16)
+ _, randErr := rand.Read(key)
+ if randErr != nil {
+ return randErr
+ }
+ clonedReq.Header["Sec-WebSocket-Key"] = []string{base64.StdEncoding.EncodeToString(key)}
+ }
// we will need the original headers and Host value if
// header operations are configured; this is so that each
@@ -496,7 +510,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
if proxyErr == nil {
proxyErr = caddyhttp.Error(http.StatusServiceUnavailable, errNoUpstream)
}
- if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r) {
+ if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r, h.logger) {
return true, proxyErr
}
return false, proxyErr
@@ -554,7 +568,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
// ding the health status of the upstream (an error can still
// occur after the roundtrip if, for example, a response handler
// after the roundtrip returns an error)
- if succ, ok := proxyErr.(roundtripSucceeded); ok {
+ if succ, ok := proxyErr.(roundtripSucceededError); ok {
return true, succ.error
}
@@ -562,7 +576,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
h.countFailure(upstream)
// if we've tried long enough, break
- if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r) {
+ if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r, h.logger) {
return true, proxyErr
}
@@ -625,7 +639,8 @@ func (h Handler) prepareRequest(req *http.Request, repl *caddy.Replacer) (*http.
if h.RequestBuffers != 0 && req.Body != nil {
var readBytes int64
req.Body, readBytes = h.bufferedBody(req.Body, h.RequestBuffers)
- if h.RequestBuffers == -1 {
+ // set Content-Length when body is fully buffered
+ if b, ok := req.Body.(bodyReadCloser); ok && b.body == nil {
req.ContentLength = readBytes
req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10))
}
@@ -807,37 +822,44 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origRe
shouldLogCredentials := server.Logs != nil && server.Logs.ShouldLogCredentials
// Forward 1xx status codes, backported from https://github.com/golang/go/pull/53164
+ var (
+ roundTripMutex sync.Mutex
+ roundTripDone bool
+ )
trace := &httptrace.ClientTrace{
Got1xxResponse: func(code int, header textproto.MIMEHeader) error {
+ roundTripMutex.Lock()
+ defer roundTripMutex.Unlock()
+ if roundTripDone {
+ // If RoundTrip has returned, don't try to further modify
+ // the ResponseWriter's header map.
+ return nil
+ }
h := rw.Header()
copyHeader(h, http.Header(header))
rw.WriteHeader(code)
// Clear headers coming from the backend
// (it's not automatically done by ResponseWriter.WriteHeader() for 1xx responses)
- for k := range header {
- delete(h, k)
- }
+ clear(h)
return nil
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
- // if FlushInterval is explicitly configured to -1 (i.e. flush continuously to achieve
- // low-latency streaming), don't let the transport cancel the request if the client
- // disconnects: user probably wants us to finish sending the data to the upstream
- // regardless, and we should expect client disconnection in low-latency streaming
- // scenarios (see issue #4922)
- if h.FlushInterval == -1 {
- req = req.WithContext(context.WithoutCancel(req.Context()))
- }
-
- // do the round-trip; emit debug log with values we know are
- // safe, or if there is no error, emit fuller log entry
+ // do the round-trip
start := time.Now()
res, err := h.Transport.RoundTrip(req)
duration := time.Since(start)
+
+ // record that the round trip is done for the 1xx response handler
+ roundTripMutex.Lock()
+ roundTripDone = true
+ roundTripMutex.Unlock()
+
+ // emit debug log with values we know are safe,
+ // or if there is no error, emit fuller log entry
logger := h.logger.With(
zap.String("upstream", di.Upstream.String()),
zap.Duration("duration", duration),
@@ -951,10 +973,10 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origRe
res.Body.Close()
}
- // wrap any route error in roundtripSucceeded so caller knows that
+ // wrap any route error in roundtripSucceededError so caller knows that
// the roundtrip was successful and to not retry
if routeErr != nil {
- return roundtripSucceeded{routeErr}
+ return roundtripSucceededError{routeErr}
}
// we're done handling the response, and we don't want to
@@ -1073,7 +1095,7 @@ func (h *Handler) finalizeResponse(
// If true is returned, it has already blocked long enough before
// the next retry (i.e. no more sleeping is needed). If false is
// returned, the handler should stop trying to proxy the request.
-func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int, proxyErr error, req *http.Request) bool {
+func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int, proxyErr error, req *http.Request, logger *zap.Logger) bool {
// no retries are configured
if lb.TryDuration == 0 && lb.Retries == 0 {
return false
@@ -1108,7 +1130,12 @@ func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int
return false
}
- if !lb.RetryMatch.AnyMatch(req) {
+ match, err := lb.RetryMatch.AnyMatchWithError(req)
+ if err != nil {
+ logger.Error("error matching request for retry", zap.Error(err))
+ return false
+ }
+ if !match {
return false
}
}
@@ -1426,9 +1453,9 @@ type TLSTransport interface {
EnableTLS(base *TLSConfig) error
}
-// roundtripSucceeded is an error type that is returned if the
+// roundtripSucceededError is an error type that is returned if the
// roundtrip succeeded, but an error occurred after-the-fact.
-type roundtripSucceeded struct{ error }
+type roundtripSucceededError struct{ error }
// bodyReadCloser is a reader that, upon closing, will return
// its buffer to the pool and close the underlying body reader.