2022-09-21 13:55:23 -05:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2023-03-02 23:00:18 -05:00
|
|
|
//go:build !unix
|
2022-09-02 17:59:11 -05:00
|
|
|
|
|
|
|
package caddy
|
|
|
|
|
|
|
|
import (
|
2022-09-28 14:35:51 -05:00
|
|
|
"context"
|
2022-09-02 17:59:11 -05:00
|
|
|
"net"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2022-09-28 14:35:51 -05:00
|
|
|
func reuseUnixSocket(network, addr string) (any, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-09-02 17:59:11 -05:00
|
|
|
|
core: Apply SO_REUSEPORT to UDP sockets (#5725)
* core: Apply SO_REUSEPORT to UDP sockets
For some reason, 10 months ago when I implemented SO_REUSEPORT
for TCP, I didn't realize, or forgot, that it can be used for UDP too. It is a
much better solution than using deadline hacks to reuse a socket, at
least for TCP.
Then https://github.com/mholt/caddy-l4/issues/132 was posted,
in which we see that UDP servers never actually stopped when the
L4 app was stopped. I verified this using this command:
$ nc -u 127.0.0.1 55353
combined with POSTing configs to the /load admin endpoint (which
alternated between an echo server and a proxy server so I could tell
which config was being used).
I refactored the code to use SO_REUSEPORT for UDP, but of course
we still need graceful reloads on all platforms, not just Unix, so I
also implemented a deadline hack similar to what we used for
TCP before. That implementation for TCP was not perfect, possibly
having a logical (not data) race condition; but for UDP so far it
seems to be working. Verified the same way I verified that SO_REUSEPORT
works.
I think this code is slightly cleaner and I'm fairly confident this code
is effective.
* Check error
* Fix return
* Fix var name
* implement Unwrap interface and clean up
* move unix packet conn to platform specific file
* implement Unwrap for unix packet conn
* Move sharedPacketConn into proper file
* Fix Windows
* move sharedPacketConn and fakeClosePacketConn to proper file
---------
Co-authored-by: Weidi Deng <weidi_deng@icloud.com>
2023-10-16 23:17:32 -05:00
|
|
|
func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) {
|
|
|
|
switch network {
|
|
|
|
case "udp", "udp4", "udp6", "unixgram":
|
|
|
|
sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
|
|
|
pc, err := config.ListenPacket(ctx, network, address)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &sharedPacketConn{PacketConn: pc, key: lnKey}, nil
|
|
|
|
})
|
2022-09-02 17:59:11 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
core: Apply SO_REUSEPORT to UDP sockets (#5725)
* core: Apply SO_REUSEPORT to UDP sockets
For some reason, 10 months ago when I implemented SO_REUSEPORT
for TCP, I didn't realize, or forgot, that it can be used for UDP too. It is a
much better solution than using deadline hacks to reuse a socket, at
least for TCP.
Then https://github.com/mholt/caddy-l4/issues/132 was posted,
in which we see that UDP servers never actually stopped when the
L4 app was stopped. I verified this using this command:
$ nc -u 127.0.0.1 55353
combined with POSTing configs to the /load admin endpoint (which
alternated between an echo server and a proxy server so I could tell
which config was being used).
I refactored the code to use SO_REUSEPORT for UDP, but of course
we still need graceful reloads on all platforms, not just Unix, so I
also implemented a deadline hack similar to what we used for
TCP before. That implementation for TCP was not perfect, possibly
having a logical (not data) race condition; but for UDP so far it
seems to be working. Verified the same way I verified that SO_REUSEPORT
works.
I think this code is slightly cleaner and I'm fairly confident this code
is effective.
* Check error
* Fix return
* Fix var name
* implement Unwrap interface and clean up
* move unix packet conn to platform specific file
* implement Unwrap for unix packet conn
* Move sharedPacketConn into proper file
* Fix Windows
* move sharedPacketConn and fakeClosePacketConn to proper file
---------
Co-authored-by: Weidi Deng <weidi_deng@icloud.com>
2023-10-16 23:17:32 -05:00
|
|
|
return &fakeClosePacketConn{sharedPacketConn: sharedPc.(*sharedPacketConn)}, nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
|
|
|
ln, err := config.Listen(ctx, network, address)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &sharedListener{Listener: ln, key: lnKey}, nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener), keepAlivePeriod: config.KeepAlive}, nil
|
2022-09-02 17:59:11 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fakeCloseListener is a private wrapper over a listener that
|
|
|
|
// is shared. The state of fakeCloseListener is not shared.
|
|
|
|
// This allows one user of a socket to "close" the listener
|
|
|
|
// while in reality the socket stays open for other users of
|
|
|
|
// the listener. In this way, servers become hot-swappable
|
|
|
|
// while the listener remains running. Listeners should be
|
|
|
|
// re-wrapped in a new fakeCloseListener each time the listener
|
|
|
|
// is reused. This type is atomic and values must not be copied.
|
|
|
|
type fakeCloseListener struct {
|
|
|
|
closed int32 // accessed atomically; belongs to this struct only
|
|
|
|
*sharedListener // embedded, so we also become a net.Listener
|
|
|
|
keepAlivePeriod time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type canSetKeepAlive interface {
|
|
|
|
SetKeepAlivePeriod(d time.Duration) error
|
|
|
|
SetKeepAlive(bool) error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
|
|
|
|
// if the listener is already "closed", return error
|
|
|
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
|
|
|
return nil, fakeClosedErr(fcl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// call underlying accept
|
|
|
|
conn, err := fcl.sharedListener.Accept()
|
|
|
|
if err == nil {
|
|
|
|
// if 0, do nothing, Go's default is already set
|
|
|
|
// and if the connection allows setting KeepAlive, set it
|
|
|
|
if tconn, ok := conn.(canSetKeepAlive); ok && fcl.keepAlivePeriod != 0 {
|
|
|
|
if fcl.keepAlivePeriod > 0 {
|
|
|
|
err = tconn.SetKeepAlivePeriod(fcl.keepAlivePeriod)
|
|
|
|
} else { // negative
|
|
|
|
err = tconn.SetKeepAlive(false)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
Log().With(zap.String("server", fcl.sharedListener.key)).Warn("unable to set keepalive for new connection:", zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// since Accept() returned an error, it may be because our reference to
|
|
|
|
// the listener (this fakeCloseListener) may have been closed, i.e. the
|
|
|
|
// server is shutting down; in that case, we need to clear the deadline
|
|
|
|
// that we set when Close() was called, and return a non-temporary and
|
|
|
|
// non-timeout error value to the caller, masking the "true" error, so
|
|
|
|
// that server loops / goroutines won't retry, linger, and leak
|
|
|
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
|
|
|
// we dereference the sharedListener explicitly even though it's embedded
|
|
|
|
// so that it's clear in the code that side-effects are shared with other
|
|
|
|
// users of this listener, not just our own reference to it; we also don't
|
|
|
|
// do anything with the error because all we could do is log it, but we
|
core: Apply SO_REUSEPORT to UDP sockets (#5725)
* core: Apply SO_REUSEPORT to UDP sockets
For some reason, 10 months ago when I implemented SO_REUSEPORT
for TCP, I didn't realize, or forgot, that it can be used for UDP too. It is a
much better solution than using deadline hacks to reuse a socket, at
least for TCP.
Then https://github.com/mholt/caddy-l4/issues/132 was posted,
in which we see that UDP servers never actually stopped when the
L4 app was stopped. I verified this using this command:
$ nc -u 127.0.0.1 55353
combined with POSTing configs to the /load admin endpoint (which
alternated between an echo server and a proxy server so I could tell
which config was being used).
I refactored the code to use SO_REUSEPORT for UDP, but of course
we still need graceful reloads on all platforms, not just Unix, so I
also implemented a deadline hack similar to what we used for
TCP before. That implementation for TCP was not perfect, possibly
having a logical (not data) race condition; but for UDP so far it
seems to be working. Verified the same way I verified that SO_REUSEPORT
works.
I think this code is slightly cleaner and I'm fairly confident this code
is effective.
* Check error
* Fix return
* Fix var name
* implement Unwrap interface and clean up
* move unix packet conn to platform specific file
* implement Unwrap for unix packet conn
* Move sharedPacketConn into proper file
* Fix Windows
* move sharedPacketConn and fakeClosePacketConn to proper file
---------
Co-authored-by: Weidi Deng <weidi_deng@icloud.com>
2023-10-16 23:17:32 -05:00
|
|
|
// explicitly assign it to nothing so we don't forget it's there if needed
|
2022-09-02 17:59:11 -05:00
|
|
|
_ = fcl.sharedListener.clearDeadline()
|
|
|
|
|
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
return nil, fakeClosedErr(fcl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops accepting new connections without closing the
|
|
|
|
// underlying listener. The underlying listener is only closed
|
|
|
|
// if the caller is the last known user of the socket.
|
|
|
|
func (fcl *fakeCloseListener) Close() error {
|
|
|
|
if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
|
|
|
|
// There are two ways I know of to get an Accept()
|
|
|
|
// function to return to the server loop that called
|
|
|
|
// it: close the listener, or set a deadline in the
|
|
|
|
// past. Obviously, we can't close the socket yet
|
|
|
|
// since others may be using it (hence this whole
|
|
|
|
// file). But we can set the deadline in the past,
|
|
|
|
// and this is kind of cheating, but it works, and
|
|
|
|
// it apparently even works on Windows.
|
|
|
|
_ = fcl.sharedListener.setDeadline()
|
|
|
|
_, _ = listenerPool.Delete(fcl.sharedListener.key)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sharedListener is a wrapper over an underlying listener. The listener
|
|
|
|
// and the other fields on the struct are shared state that is synchronized,
|
|
|
|
// so sharedListener structs must never be copied (always use a pointer).
|
|
|
|
type sharedListener struct {
|
|
|
|
net.Listener
|
|
|
|
key string // uniquely identifies this listener
|
|
|
|
deadline bool // whether a deadline is currently set
|
|
|
|
deadlineMu sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *sharedListener) clearDeadline() error {
|
|
|
|
var err error
|
|
|
|
sl.deadlineMu.Lock()
|
|
|
|
if sl.deadline {
|
|
|
|
switch ln := sl.Listener.(type) {
|
|
|
|
case *net.TCPListener:
|
|
|
|
err = ln.SetDeadline(time.Time{})
|
|
|
|
}
|
|
|
|
sl.deadline = false
|
|
|
|
}
|
|
|
|
sl.deadlineMu.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *sharedListener) setDeadline() error {
|
|
|
|
timeInPast := time.Now().Add(-1 * time.Minute)
|
|
|
|
var err error
|
|
|
|
sl.deadlineMu.Lock()
|
|
|
|
if !sl.deadline {
|
|
|
|
switch ln := sl.Listener.(type) {
|
|
|
|
case *net.TCPListener:
|
|
|
|
err = ln.SetDeadline(timeInPast)
|
|
|
|
}
|
|
|
|
sl.deadline = true
|
|
|
|
}
|
|
|
|
sl.deadlineMu.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destruct is called by the UsagePool when the listener is
|
|
|
|
// finally not being used anymore. It closes the socket.
|
|
|
|
func (sl *sharedListener) Destruct() error {
|
|
|
|
return sl.Listener.Close()
|
|
|
|
}
|
core: Apply SO_REUSEPORT to UDP sockets (#5725)
* core: Apply SO_REUSEPORT to UDP sockets
For some reason, 10 months ago when I implemented SO_REUSEPORT
for TCP, I didn't realize, or forgot, that it can be used for UDP too. It is a
much better solution than using deadline hacks to reuse a socket, at
least for TCP.
Then https://github.com/mholt/caddy-l4/issues/132 was posted,
in which we see that UDP servers never actually stopped when the
L4 app was stopped. I verified this using this command:
$ nc -u 127.0.0.1 55353
combined with POSTing configs to the /load admin endpoint (which
alternated between an echo server and a proxy server so I could tell
which config was being used).
I refactored the code to use SO_REUSEPORT for UDP, but of course
we still need graceful reloads on all platforms, not just Unix, so I
also implemented a deadline hack similar to what we used for
TCP before. That implementation for TCP was not perfect, possibly
having a logical (not data) race condition; but for UDP so far it
seems to be working. Verified the same way I verified that SO_REUSEPORT
works.
I think this code is slightly cleaner and I'm fairly confident this code
is effective.
* Check error
* Fix return
* Fix var name
* implement Unwrap interface and clean up
* move unix packet conn to platform specific file
* implement Unwrap for unix packet conn
* Move sharedPacketConn into proper file
* Fix Windows
* move sharedPacketConn and fakeClosePacketConn to proper file
---------
Co-authored-by: Weidi Deng <weidi_deng@icloud.com>
2023-10-16 23:17:32 -05:00
|
|
|
|
|
|
|
// fakeClosePacketConn is like fakeCloseListener, but for PacketConns,
|
|
|
|
// or more specifically, *net.UDPConn
|
|
|
|
type fakeClosePacketConn struct {
|
|
|
|
closed int32 // accessed atomically; belongs to this struct only
|
|
|
|
*sharedPacketConn // embedded, so we also become a net.PacketConn; its key is used in Close
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fcpc *fakeClosePacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
|
|
|
|
// if the listener is already "closed", return error
|
|
|
|
if atomic.LoadInt32(&fcpc.closed) == 1 {
|
|
|
|
return 0, nil, &net.OpError{
|
|
|
|
Op: "readfrom",
|
|
|
|
Net: fcpc.LocalAddr().Network(),
|
|
|
|
Addr: fcpc.LocalAddr(),
|
|
|
|
Err: errFakeClosed,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// call underlying readfrom
|
|
|
|
n, addr, err = fcpc.sharedPacketConn.ReadFrom(p)
|
|
|
|
if err != nil {
|
|
|
|
// this server was stopped, so clear the deadline and let
|
|
|
|
// any new server continue reading; but we will exit
|
|
|
|
if atomic.LoadInt32(&fcpc.closed) == 1 {
|
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
if err = fcpc.SetReadDeadline(time.Time{}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close won't close the underlying socket unless there is no more reference, then listenerPool will close it.
|
|
|
|
func (fcpc *fakeClosePacketConn) Close() error {
|
|
|
|
if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
|
|
|
|
_ = fcpc.SetReadDeadline(time.Now()) // unblock ReadFrom() calls to kick old servers out of their loops
|
|
|
|
_, _ = listenerPool.Delete(fcpc.sharedPacketConn.key)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fcpc *fakeClosePacketConn) Unwrap() net.PacketConn {
|
|
|
|
return fcpc.sharedPacketConn.PacketConn
|
|
|
|
}
|
|
|
|
|
|
|
|
// sharedPacketConn is like sharedListener, but for net.PacketConns.
|
|
|
|
type sharedPacketConn struct {
|
|
|
|
net.PacketConn
|
|
|
|
key string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destruct closes the underlying socket.
|
|
|
|
func (spc *sharedPacketConn) Destruct() error {
|
|
|
|
return spc.PacketConn.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unwrap returns the underlying socket
|
|
|
|
func (spc *sharedPacketConn) Unwrap() net.PacketConn {
|
|
|
|
return spc.PacketConn
|
|
|
|
}
|
|
|
|
|
|
|
|
// Interface guards (see https://github.com/caddyserver/caddy/issues/3998)
|
|
|
|
var (
|
|
|
|
_ (interface {
|
|
|
|
Unwrap() net.PacketConn
|
|
|
|
}) = (*fakeClosePacketConn)(nil)
|
|
|
|
)
|