aboutsummaryrefslogtreecommitdiffhomepage
path: root/listen.go
diff options
context:
space:
mode:
authorMatt Holt <[email protected]>2022-09-02 16:59:11 -0600
committerGitHub <[email protected]>2022-09-02 16:59:11 -0600
commitd3c3fa10bd72029720969cff0c29e2b79a1b2cdf (patch)
tree3c4789238dd88a45f5272940c31a9d865613bd14 /listen.go
parent83b26975bd9330c4cfc44d52b106da739240e72b (diff)
downloadcaddy-d3c3fa10bd72029720969cff0c29e2b79a1b2cdf.tar.gz
caddy-d3c3fa10bd72029720969cff0c29e2b79a1b2cdf.zip
core: Refactor listeners; use SO_REUSEPORT on Unix (#4705)
* core: Refactor listeners; use SO_REUSEPORT on Unix Just an experiment for now * Fix lint by logging error * TCP Keepalive configuration (#4865) * initial attempt at TCP Keepalive configuration * core: implement tcp-keepalive for linux * move canSetKeepAlive interface * Godoc for keepalive server parameter * handle return values * log keepalive errors * Clean up after bad merge * Merge in pluggable network types From 1edc1a45e3aee1f7d86b68c3ddaf2fd16ba8ab73 * Slight refactor, fix from recent merge conflict Co-authored-by: Karmanyaah Malhotra <[email protected]>
Diffstat (limited to 'listen.go')
-rw-r--r--listen.go172
1 files changed, 172 insertions, 0 deletions
diff --git a/listen.go b/listen.go
new file mode 100644
index 000000000..2c4a0b288
--- /dev/null
+++ b/listen.go
@@ -0,0 +1,172 @@
+//go:build !linux
+
+package caddy
+
+import (
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+func ListenTimeout(network, addr string, keepAlivePeriod time.Duration) (net.Listener, error) {
+ // check to see if plugin provides listener
+ if ln, err := getListenerFromPlugin(network, addr); err != nil || ln != nil {
+ return ln, err
+ }
+
+ lnKey := listenerKey(network, addr)
+
+ sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
+ ln, err := net.Listen(network, addr)
+ if err != nil {
+ // https://github.com/caddyserver/caddy/pull/4534
+ if isUnixNetwork(network) && isListenBindAddressAlreadyInUseError(err) {
+ return nil, fmt.Errorf("%w: this can happen if Caddy was forcefully killed", err)
+ }
+ return nil, err
+ }
+ return &sharedListener{Listener: ln, key: lnKey}, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener), keepAlivePeriod: keepAlivePeriod}, nil
+}
+
+// fakeCloseListener is a private wrapper over a listener that
+// is shared. The state of fakeCloseListener is not shared.
+// This allows one user of a socket to "close" the listener
+// while in reality the socket stays open for other users of
+// the listener. In this way, servers become hot-swappable
+// while the listener remains running. Listeners should be
+// re-wrapped in a new fakeCloseListener each time the listener
+// is reused. This type is atomic and values must not be copied.
+type fakeCloseListener struct {
+ closed int32 // accessed atomically; belongs to this struct only
+ *sharedListener // embedded, so we also become a net.Listener
+ keepAlivePeriod time.Duration
+}
+
+type canSetKeepAlive interface {
+ SetKeepAlivePeriod(d time.Duration) error
+ SetKeepAlive(bool) error
+}
+
+func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
+ // if the listener is already "closed", return error
+ if atomic.LoadInt32(&fcl.closed) == 1 {
+ return nil, fakeClosedErr(fcl)
+ }
+
+ // call underlying accept
+ conn, err := fcl.sharedListener.Accept()
+ if err == nil {
+ // if 0, do nothing, Go's default is already set
+ // and if the connection allows setting KeepAlive, set it
+ if tconn, ok := conn.(canSetKeepAlive); ok && fcl.keepAlivePeriod != 0 {
+ if fcl.keepAlivePeriod > 0 {
+ err = tconn.SetKeepAlivePeriod(fcl.keepAlivePeriod)
+ } else { // negative
+ err = tconn.SetKeepAlive(false)
+ }
+ if err != nil {
+ Log().With(zap.String("server", fcl.sharedListener.key)).Warn("unable to set keepalive for new connection:", zap.Error(err))
+ }
+ }
+ return conn, nil
+ }
+
+ // since Accept() returned an error, it may be because our reference to
+ // the listener (this fakeCloseListener) may have been closed, i.e. the
+ // server is shutting down; in that case, we need to clear the deadline
+ // that we set when Close() was called, and return a non-temporary and
+ // non-timeout error value to the caller, masking the "true" error, so
+ // that server loops / goroutines won't retry, linger, and leak
+ if atomic.LoadInt32(&fcl.closed) == 1 {
+ // we dereference the sharedListener explicitly even though it's embedded
+ // so that it's clear in the code that side-effects are shared with other
+ // users of this listener, not just our own reference to it; we also don't
+ // do anything with the error because all we could do is log it, but we
+ // expliclty assign it to nothing so we don't forget it's there if needed
+ _ = fcl.sharedListener.clearDeadline()
+
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return nil, fakeClosedErr(fcl)
+ }
+ }
+
+ return nil, err
+}
+
+// Close stops accepting new connections without closing the
+// underlying listener. The underlying listener is only closed
+// if the caller is the last known user of the socket.
+func (fcl *fakeCloseListener) Close() error {
+ if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
+ // There are two ways I know of to get an Accept()
+ // function to return to the server loop that called
+ // it: close the listener, or set a deadline in the
+ // past. Obviously, we can't close the socket yet
+ // since others may be using it (hence this whole
+ // file). But we can set the deadline in the past,
+ // and this is kind of cheating, but it works, and
+ // it apparently even works on Windows.
+ _ = fcl.sharedListener.setDeadline()
+ _, _ = listenerPool.Delete(fcl.sharedListener.key)
+ }
+ return nil
+}
+
+// sharedListener is a wrapper over an underlying listener. The listener
+// and the other fields on the struct are shared state that is synchronized,
+// so sharedListener structs must never be copied (always use a pointer).
+type sharedListener struct {
+ net.Listener
+ key string // uniquely identifies this listener
+ deadline bool // whether a deadline is currently set
+ deadlineMu sync.Mutex
+}
+
+func (sl *sharedListener) clearDeadline() error {
+ var err error
+ sl.deadlineMu.Lock()
+ if sl.deadline {
+ switch ln := sl.Listener.(type) {
+ case *net.TCPListener:
+ err = ln.SetDeadline(time.Time{})
+ case *net.UnixListener:
+ err = ln.SetDeadline(time.Time{})
+ }
+ sl.deadline = false
+ }
+ sl.deadlineMu.Unlock()
+ return err
+}
+
+func (sl *sharedListener) setDeadline() error {
+ timeInPast := time.Now().Add(-1 * time.Minute)
+ var err error
+ sl.deadlineMu.Lock()
+ if !sl.deadline {
+ switch ln := sl.Listener.(type) {
+ case *net.TCPListener:
+ err = ln.SetDeadline(timeInPast)
+ case *net.UnixListener:
+ err = ln.SetDeadline(timeInPast)
+ }
+ sl.deadline = true
+ }
+ sl.deadlineMu.Unlock()
+ return err
+}
+
+// Destruct is called by the UsagePool when the listener is
+// finally not being used anymore. It closes the socket.
+func (sl *sharedListener) Destruct() error {
+ return sl.Listener.Close()
+}