2019-06-30 17:07:58 -05:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2019-06-14 12:58:28 -05:00
|
|
|
package caddy
|
2019-03-26 16:45:51 -05:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-09-10 09:03:37 -05:00
|
|
|
"log"
|
2019-03-26 16:45:51 -05:00
|
|
|
"net"
|
2019-07-08 17:46:38 -05:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2019-03-26 20:42:52 -05:00
|
|
|
"sync"
|
2019-03-26 16:45:51 -05:00
|
|
|
"sync/atomic"
|
2019-04-02 15:58:24 -05:00
|
|
|
"time"
|
2019-03-26 16:45:51 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// Listen returns a listener suitable for use in a Caddy module.
|
2019-05-16 17:05:38 -05:00
|
|
|
// Always be sure to close listeners when you are done with them.
|
2019-03-31 21:41:29 -05:00
|
|
|
func Listen(network, addr string) (net.Listener, error) {
|
|
|
|
lnKey := network + "/" + addr
|
2019-03-26 20:42:52 -05:00
|
|
|
|
|
|
|
listenersMu.Lock()
|
|
|
|
defer listenersMu.Unlock()
|
|
|
|
|
2019-04-02 16:31:02 -05:00
|
|
|
// if listener already exists, increment usage counter, then return listener
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
if lnGlobal, ok := listeners[lnKey]; ok {
|
|
|
|
atomic.AddInt32(&lnGlobal.usage, 1)
|
|
|
|
return &fakeCloseListener{
|
|
|
|
usage: &lnGlobal.usage,
|
|
|
|
deadline: &lnGlobal.deadline,
|
|
|
|
deadlineMu: &lnGlobal.deadlineMu,
|
|
|
|
key: lnKey,
|
|
|
|
Listener: lnGlobal.ln,
|
|
|
|
}, nil
|
2019-03-26 20:42:52 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// or, create new one and save it
|
2019-03-31 21:41:29 -05:00
|
|
|
ln, err := net.Listen(network, addr)
|
2019-03-26 16:45:51 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-26 20:42:52 -05:00
|
|
|
|
2019-04-02 16:31:02 -05:00
|
|
|
// make sure to start its usage counter at 1
|
2019-11-15 14:47:38 -05:00
|
|
|
lnGlobal := &globalListener{usage: 1, ln: ln}
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
listeners[lnKey] = lnGlobal
|
|
|
|
|
|
|
|
return &fakeCloseListener{
|
|
|
|
usage: &lnGlobal.usage,
|
|
|
|
deadline: &lnGlobal.deadline,
|
|
|
|
deadlineMu: &lnGlobal.deadlineMu,
|
|
|
|
key: lnKey,
|
|
|
|
Listener: ln,
|
|
|
|
}, nil
|
2019-03-26 16:45:51 -05:00
|
|
|
}
|
|
|
|
|
2019-09-10 09:03:37 -05:00
|
|
|
// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
|
|
|
|
// Always be sure to close the PacketConn when you are done.
|
|
|
|
func ListenPacket(network, addr string) (net.PacketConn, error) {
|
|
|
|
lnKey := network + "/" + addr
|
|
|
|
|
|
|
|
listenersMu.Lock()
|
|
|
|
defer listenersMu.Unlock()
|
|
|
|
|
|
|
|
// if listener already exists, increment usage counter, then return listener
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
if lnGlobal, ok := listeners[lnKey]; ok {
|
|
|
|
atomic.AddInt32(&lnGlobal.usage, 1)
|
|
|
|
log.Printf("[DEBUG] %s: Usage counter should not go above 2 or maybe 3, is now: %d", lnKey, atomic.LoadInt32(&lnGlobal.usage)) // TODO: remove
|
|
|
|
return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: lnGlobal.pc}, nil
|
2019-09-10 09:03:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// or, create new one and save it
|
|
|
|
pc, err := net.ListenPacket(network, addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure to start its usage counter at 1
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
lnGlobal := &globalListener{usage: 1, pc: pc}
|
|
|
|
listeners[lnKey] = lnGlobal
|
2019-09-10 09:03:37 -05:00
|
|
|
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: pc}, nil
|
2019-09-10 09:03:37 -05:00
|
|
|
}
|
|
|
|
|
2019-03-26 16:45:51 -05:00
|
|
|
// fakeCloseListener's Close() method is a no-op. This allows
|
|
|
|
// stopping servers that are using the listener without giving
|
|
|
|
// up the socket; thus, servers become hot-swappable while the
|
|
|
|
// listener remains running. Listeners should be re-wrapped in
|
|
|
|
// a new fakeCloseListener each time the listener is reused.
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
// Other than the 'closed' field (which pertains to this value
|
|
|
|
// only), the other fields in this struct should be pointers to
|
|
|
|
// the associated globalListener's struct fields (except 'key'
|
|
|
|
// which is there for read-only purposes, so it can be a copy).
|
2019-03-26 16:45:51 -05:00
|
|
|
type fakeCloseListener struct {
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
closed int32 // accessed atomically; belongs to this struct only
|
|
|
|
usage *int32 // accessed atomically; global
|
|
|
|
deadline *bool // protected by deadlineMu; global
|
|
|
|
deadlineMu *sync.Mutex // global
|
|
|
|
key string // global, but read-only, so can be copy
|
|
|
|
net.Listener // global
|
2019-03-26 16:45:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Accept accepts connections until Close() is called.
|
|
|
|
func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
|
2019-04-02 15:58:24 -05:00
|
|
|
// if the listener is already "closed", return error
|
2019-03-26 16:45:51 -05:00
|
|
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
2019-04-02 15:58:24 -05:00
|
|
|
return nil, fcl.fakeClosedErr()
|
2019-03-26 16:45:51 -05:00
|
|
|
}
|
2019-04-02 15:58:24 -05:00
|
|
|
|
|
|
|
// wrap underlying accept
|
|
|
|
conn, err := fcl.Listener.Accept()
|
|
|
|
if err == nil {
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
// accept returned with error
|
|
|
|
// TODO: This may be better as a condition variable so the deadline is cleared only once?
|
|
|
|
fcl.deadlineMu.Lock()
|
|
|
|
if *fcl.deadline {
|
2019-04-02 15:58:24 -05:00
|
|
|
switch ln := fcl.Listener.(type) {
|
|
|
|
case *net.TCPListener:
|
2020-11-22 16:50:29 -05:00
|
|
|
_ = ln.SetDeadline(time.Time{})
|
2019-04-02 15:58:24 -05:00
|
|
|
case *net.UnixListener:
|
2020-11-22 16:50:29 -05:00
|
|
|
_ = ln.SetDeadline(time.Time{})
|
2019-04-02 15:58:24 -05:00
|
|
|
}
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
*fcl.deadline = false
|
|
|
|
}
|
|
|
|
fcl.deadlineMu.Unlock()
|
2019-04-02 15:58:24 -05:00
|
|
|
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
2020-01-03 13:33:22 -05:00
|
|
|
// if we canceled the Accept() by setting a deadline
|
2019-04-02 15:58:24 -05:00
|
|
|
// on the listener, we need to make sure any callers of
|
|
|
|
// Accept() think the listener was actually closed;
|
|
|
|
// if we return the timeout error instead, callers might
|
|
|
|
// simply retry, leaking goroutines for longer
|
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
return nil, fcl.fakeClosedErr()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
2019-03-26 16:45:51 -05:00
|
|
|
}
|
|
|
|
|
2019-05-16 17:05:38 -05:00
|
|
|
// Close stops accepting new connections without
|
|
|
|
// closing the underlying listener, unless no one
|
|
|
|
// else is using it.
|
2019-03-26 16:45:51 -05:00
|
|
|
func (fcl *fakeCloseListener) Close() error {
|
2019-04-02 15:58:24 -05:00
|
|
|
if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
|
|
|
|
// unfortunately, there is no way to cancel any
|
|
|
|
// currently-blocking calls to Accept() that are
|
|
|
|
// awaiting connections since we're not actually
|
|
|
|
// closing the listener; so we cheat by setting
|
|
|
|
// a deadline in the past, which forces it to
|
|
|
|
// time out; note that this only works for
|
|
|
|
// certain types of listeners...
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
fcl.deadlineMu.Lock()
|
|
|
|
if !*fcl.deadline {
|
|
|
|
switch ln := fcl.Listener.(type) {
|
|
|
|
case *net.TCPListener:
|
2020-11-22 16:50:29 -05:00
|
|
|
_ = ln.SetDeadline(time.Now().Add(-1 * time.Minute))
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
case *net.UnixListener:
|
2020-11-22 16:50:29 -05:00
|
|
|
_ = ln.SetDeadline(time.Now().Add(-1 * time.Minute))
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
}
|
|
|
|
*fcl.deadline = true
|
2019-04-02 15:58:24 -05:00
|
|
|
}
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
fcl.deadlineMu.Unlock()
|
2019-04-02 16:31:02 -05:00
|
|
|
|
|
|
|
// since we're no longer using this listener,
|
2019-05-16 17:05:38 -05:00
|
|
|
// decrement the usage counter and, if no one
|
|
|
|
// else is using it, close underlying listener
|
|
|
|
if atomic.AddInt32(fcl.usage, -1) == 0 {
|
|
|
|
listenersMu.Lock()
|
|
|
|
delete(listeners, fcl.key)
|
|
|
|
listenersMu.Unlock()
|
|
|
|
err := fcl.Listener.Close()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-02 15:58:24 -05:00
|
|
|
}
|
|
|
|
|
2019-03-26 16:45:51 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 15:58:24 -05:00
|
|
|
func (fcl *fakeCloseListener) fakeClosedErr() error {
|
|
|
|
return &net.OpError{
|
|
|
|
Op: "accept",
|
|
|
|
Net: fcl.Listener.Addr().Network(),
|
|
|
|
Addr: fcl.Listener.Addr(),
|
2019-04-25 14:54:48 -05:00
|
|
|
Err: errFakeClosed,
|
2019-04-02 15:58:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 09:03:37 -05:00
|
|
|
type fakeClosePacketConn struct {
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
closed int32 // accessed atomically
|
2019-09-10 09:03:37 -05:00
|
|
|
usage *int32 // accessed atomically
|
|
|
|
key string
|
|
|
|
net.PacketConn
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fcpc *fakeClosePacketConn) Close() error {
|
|
|
|
log.Println("[DEBUG] Fake-closing underlying packet conn") // TODO: remove this
|
|
|
|
|
|
|
|
if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
|
|
|
|
// since we're no longer using this listener,
|
|
|
|
// decrement the usage counter and, if no one
|
|
|
|
// else is using it, close underlying listener
|
|
|
|
if atomic.AddInt32(fcpc.usage, -1) == 0 {
|
|
|
|
listenersMu.Lock()
|
|
|
|
delete(listeners, fcpc.key)
|
|
|
|
listenersMu.Unlock()
|
|
|
|
err := fcpc.PacketConn.Close()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 15:58:24 -05:00
|
|
|
// ErrFakeClosed is the underlying error value returned by
|
|
|
|
// fakeCloseListener.Accept() after Close() has been called,
|
|
|
|
// indicating that it is pretending to be closed so that the
|
|
|
|
// server using it can terminate, while the underlying
|
|
|
|
// socket is actually left open.
|
2019-04-25 14:54:48 -05:00
|
|
|
var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
|
2019-03-26 20:42:52 -05:00
|
|
|
|
core: Synchronize calls to SetDeadline within fakeCloseListener
First evidenced in #2658, listener deadlines would sometimes be set
after clearing them, resulting in endless i/o timeout errors, which
leave all requests hanging. This bug is fixed by synchronizing the
calls to SetDeadline: when Close() is called, the deadline is first
set to a time in the past, and the lock is released only after the
deadline is set, so when the other servers break out of their Accept()
calls, they will clear the deadline *after* it was set. Before, the
clearing could sometimes come before the set, which meant that it was
left in a timeout state indefinitely.
This may not yet be a perfect solution -- ideally, the setting and
clearing of the deadline would happen exactly once per underlying
listener, not once per fakeCloseListener, but in rigorous testing with
these changes (comprising tens of thousands of config reloads), I was
able to verify that no race condition is manifest.
2019-11-04 14:10:03 -05:00
|
|
|
// globalListener keeps global state for a listener
|
|
|
|
// that may be shared by multiple servers. In other
|
|
|
|
// words, values in this struct exist only once and
|
|
|
|
// all other uses of these values point to the ones
|
|
|
|
// in this struct. In particular, the usage count
|
|
|
|
// (how many callers are using the listener), the
|
|
|
|
// actual listener, and synchronization of the
|
|
|
|
// listener's deadline changes are singular, global
|
|
|
|
// values that must not be copied.
|
|
|
|
type globalListener struct {
|
|
|
|
usage int32 // accessed atomically
|
|
|
|
deadline bool
|
|
|
|
deadlineMu sync.Mutex
|
|
|
|
ln net.Listener
|
|
|
|
pc net.PacketConn
|
2019-04-02 16:31:02 -05:00
|
|
|
}
|
|
|
|
|
2020-04-07 09:33:45 -05:00
|
|
|
// NetworkAddress contains the individual components
|
2019-11-11 17:33:38 -05:00
|
|
|
// for a parsed network address of the form accepted
|
|
|
|
// by ParseNetworkAddress(). Network should be a
|
|
|
|
// network value accepted by Go's net package. Port
|
|
|
|
// ranges are given by [StartPort, EndPort].
|
2020-04-07 09:33:45 -05:00
|
|
|
type NetworkAddress struct {
|
2019-11-11 17:33:38 -05:00
|
|
|
Network string
|
|
|
|
Host string
|
|
|
|
StartPort uint
|
|
|
|
EndPort uint
|
|
|
|
}
|
|
|
|
|
2020-04-07 09:33:45 -05:00
|
|
|
// IsUnixNetwork returns true if na.Network is
|
2019-12-06 13:45:50 -05:00
|
|
|
// unix, unixgram, or unixpacket.
|
2020-04-07 09:33:45 -05:00
|
|
|
func (na NetworkAddress) IsUnixNetwork() bool {
|
|
|
|
return isUnixNetwork(na.Network)
|
2019-12-06 13:45:50 -05:00
|
|
|
}
|
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
// JoinHostPort is like net.JoinHostPort, but where the port
|
|
|
|
// is StartPort + offset.
|
2020-04-07 09:33:45 -05:00
|
|
|
func (na NetworkAddress) JoinHostPort(offset uint) string {
|
|
|
|
if na.IsUnixNetwork() {
|
|
|
|
return na.Host
|
2019-12-06 13:45:50 -05:00
|
|
|
}
|
2020-04-07 09:33:45 -05:00
|
|
|
return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
|
2019-11-11 17:33:38 -05:00
|
|
|
}
|
2019-07-08 17:46:38 -05:00
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
// PortRangeSize returns how many ports are in
|
|
|
|
// pa's port range. Port ranges are inclusive,
|
|
|
|
// so the size is the difference of start and
|
|
|
|
// end ports plus one.
|
2020-04-07 09:33:45 -05:00
|
|
|
func (na NetworkAddress) PortRangeSize() uint {
|
|
|
|
return (na.EndPort - na.StartPort) + 1
|
2019-11-11 17:33:38 -05:00
|
|
|
}
|
|
|
|
|
2020-04-10 18:31:38 -05:00
|
|
|
func (na NetworkAddress) isLoopback() bool {
|
|
|
|
if na.IsUnixNetwork() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if na.Host == "localhost" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if ip := net.ParseIP(na.Host); ip != nil {
|
|
|
|
return ip.IsLoopback()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-04-16 12:41:32 -05:00
|
|
|
func (na NetworkAddress) isWildcardInterface() bool {
|
|
|
|
if na.Host == "" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if ip := net.ParseIP(na.Host); ip != nil {
|
|
|
|
return ip.IsUnspecified()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-04-10 18:31:38 -05:00
|
|
|
func (na NetworkAddress) port() string {
|
|
|
|
if na.StartPort == na.EndPort {
|
|
|
|
return strconv.FormatUint(uint64(na.StartPort), 10)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
|
|
|
|
}
|
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
// String reconstructs the address string to the form expected
|
2020-04-10 18:31:38 -05:00
|
|
|
// by ParseNetworkAddress(). If the address is a unix socket,
|
|
|
|
// any non-zero port will be dropped.
|
2020-04-07 09:33:45 -05:00
|
|
|
func (na NetworkAddress) String() string {
|
2020-04-10 18:31:38 -05:00
|
|
|
return JoinNetworkAddress(na.Network, na.Host, na.port())
|
2019-11-11 17:33:38 -05:00
|
|
|
}
|
|
|
|
|
2019-12-06 13:45:50 -05:00
|
|
|
func isUnixNetwork(netw string) bool {
|
|
|
|
return netw == "unix" || netw == "unixgram" || netw == "unixpacket"
|
|
|
|
}
|
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
// ParseNetworkAddress parses addr into its individual
|
|
|
|
// components. The input string is expected to be of
|
|
|
|
// the form "network/host:port-range" where any part is
|
|
|
|
// optional. The default network, if unspecified, is tcp.
|
|
|
|
// Port ranges are inclusive.
|
|
|
|
//
|
|
|
|
// Network addresses are distinct from URLs and do not
|
|
|
|
// use URL syntax.
|
2020-04-07 09:33:45 -05:00
|
|
|
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
|
2019-07-08 17:46:38 -05:00
|
|
|
var host, port string
|
2019-11-11 17:33:38 -05:00
|
|
|
network, host, port, err := SplitNetworkAddress(addr)
|
2019-07-08 17:46:38 -05:00
|
|
|
if network == "" {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
|
|
|
if err != nil {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{}, err
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
2019-12-06 13:45:50 -05:00
|
|
|
if isUnixNetwork(network) {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{
|
2019-11-11 17:33:38 -05:00
|
|
|
Network: network,
|
|
|
|
Host: host,
|
|
|
|
}, nil
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
|
|
|
ports := strings.SplitN(port, "-", 2)
|
|
|
|
if len(ports) == 1 {
|
|
|
|
ports = append(ports, ports[0])
|
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
var start, end uint64
|
|
|
|
start, err = strconv.ParseUint(ports[0], 10, 16)
|
2019-07-08 17:46:38 -05:00
|
|
|
if err != nil {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
end, err = strconv.ParseUint(ports[1], 10, 16)
|
2019-07-08 17:46:38 -05:00
|
|
|
if err != nil {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
|
|
|
if end < start {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
if (end - start) > maxPortSpan {
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
2020-04-07 09:33:45 -05:00
|
|
|
return NetworkAddress{
|
2019-11-11 17:33:38 -05:00
|
|
|
Network: network,
|
|
|
|
Host: host,
|
|
|
|
StartPort: uint(start),
|
|
|
|
EndPort: uint(end),
|
|
|
|
}, nil
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 14:14:39 -05:00
|
|
|
// SplitNetworkAddress splits a into its network, host, and port components.
|
2019-11-11 17:33:38 -05:00
|
|
|
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
|
2019-09-05 14:14:39 -05:00
|
|
|
func SplitNetworkAddress(a string) (network, host, port string, err error) {
|
2019-07-08 17:46:38 -05:00
|
|
|
if idx := strings.Index(a, "/"); idx >= 0 {
|
|
|
|
network = strings.ToLower(strings.TrimSpace(a[:idx]))
|
|
|
|
a = a[idx+1:]
|
|
|
|
}
|
2019-12-06 14:00:04 -05:00
|
|
|
if isUnixNetwork(network) {
|
2019-07-08 17:46:38 -05:00
|
|
|
host = a
|
|
|
|
return
|
|
|
|
}
|
|
|
|
host, port, err = net.SplitHostPort(a)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-05 14:14:39 -05:00
|
|
|
// JoinNetworkAddress combines network, host, and port into a single
|
2019-12-06 14:00:04 -05:00
|
|
|
// address string of the form accepted by ParseNetworkAddress(). For
|
|
|
|
// unix sockets, the network should be "unix" (or "unixgram" or
|
|
|
|
// "unixpacket") and the path to the socket should be given as the
|
2019-11-11 17:33:38 -05:00
|
|
|
// host parameter.
|
2019-09-05 14:14:39 -05:00
|
|
|
func JoinNetworkAddress(network, host, port string) string {
|
2019-07-08 17:46:38 -05:00
|
|
|
var a string
|
|
|
|
if network != "" {
|
|
|
|
a = network + "/"
|
|
|
|
}
|
2020-04-10 18:31:38 -05:00
|
|
|
if (host != "" && port == "") || isUnixNetwork(network) {
|
2019-10-11 15:25:39 -05:00
|
|
|
a += host
|
|
|
|
} else if port != "" {
|
|
|
|
a += net.JoinHostPort(host, port)
|
2019-07-08 17:46:38 -05:00
|
|
|
}
|
|
|
|
return a
|
|
|
|
}
|
2019-11-11 17:33:38 -05:00
|
|
|
|
2020-03-15 22:26:17 -05:00
|
|
|
// ListenerWrapper is a type that wraps a listener
|
|
|
|
// so it can modify the input listener's methods.
|
|
|
|
// Modules that implement this interface are found
|
|
|
|
// in the caddy.listeners namespace. Usually, to
|
|
|
|
// wrap a listener, you will define your own struct
|
|
|
|
// type that embeds the input listener, then
|
|
|
|
// implement your own methods that you want to wrap,
|
|
|
|
// calling the underlying listener's methods where
|
|
|
|
// appropriate.
|
|
|
|
type ListenerWrapper interface {
|
|
|
|
WrapListener(net.Listener) net.Listener
|
|
|
|
}
|
|
|
|
|
2019-11-11 17:33:38 -05:00
|
|
|
var (
|
|
|
|
listeners = make(map[string]*globalListener)
|
|
|
|
listenersMu sync.Mutex
|
|
|
|
)
|
|
|
|
|
|
|
|
const maxPortSpan = 65535
|