0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-12-30 11:23:56 -05:00
forgejo/modules/graceful/server.go
idk cdd3d4b8d8
Allow the use of alternative net.Listener implementations by downstreams (#25855)
This is a simple PR which moves the `GetListener` function to a
`DefaultGetListener` function, and changes `GetListener` to be a
variable which by default points to the `DefaultGetListener` function.
This allows people who may exist quasi-downstream of Gitea to create
alternate "GetListener" functions, with identical signatures, which
return different implementations of the `net.Listener` interface. This
approach is expressly intended to be non-invasive and have the least
possible impact on the gitea codebase. A previous version of this idea
was rejected before: https://github.com/go-gitea/gitea/issues/15544 but
because of issues like: https://github.com/go-gitea/gitea/issues/22335 I
**really** think that recommending people configure proxies by hand is
exactly the wrong way to do things(This is why there is a Tor Browser.).
This tiny change lets me put proper hidden service configuration into
single `i2p.go` file which lives in `modules/graceful/` and which never
has to be checked in to your codebase or affect your dependencies or
bloat your project in any way, it can live on a branch in my fork and
I'll fast-forward every release and never the twain shall meet.

The main use-case for this is to listen on Peer-to-Peer networks and
Hidden Services directly without error-prone and cumbersome
port-forwarding configuration. For instance, I might implement an
"I2PGetListener" as follows:

```Go
// adapted from i2p.go which is unchecked-in in my modules/graceful/ directory
import "github.com/eyedeekay/onramp"

var garlic = &onramp.Garlic{}

func I2PGetListener(network, address string) (net.Listener, error) {
	// Add a deferral to say that we've tried to grab a listener
	defer GetManager().InformCleanup()
	switch network {
	case "tcp", "tcp4", "tcp6", "i2p", "i2pt":
		return garlic.Listen()
	case "unix", "unixpacket":
// I2P isn't really a replacement for the stuff you use Unix sockets for and it's also not an anonymity risk, so treat them normally
		unixAddr, err := net.ResolveUnixAddr(network, address)
		if err != nil {
			return nil, err
		}
		return GetListenerUnix(network, unixAddr)
	default:
		return nil, net.UnknownNetworkError(network)
	}
}
```

I could then substitute that GetListener function and be 50% of the way
to having a fully-functioning gitea-over-hidden-services instance
without any additional configuration(The other 50% doesn't require any
code-changes on gitea's part).

There are 2 advantages here, one being convenience, first this turns
hidden services into a zero-configuration option for self-hosting gitea,
and second safety, these Go libraries are passing around
hidden-service-only versions of the net.Addr struct, they're using
hidden-service-only versions of the sockets, which are both expressly
designed to never require access to any information outside the hidden
service network, manipulating the application so it reveals information
about the host becomes much more difficult, and some attacks become
nearly impossible. It also opens up TLS-over-Hidden Services support
which is niche right now, of course, but in a future where gitea
instances federate if hidden services want to be part of the federation
they're probably going to need TLS certificates. They don't need to be
painful to set up.

This doesn't fix an open issue, but it might affect:
- https://github.com/go-gitea/gitea/issues/22335 - my `i2p.go` file
actually has a mod that fixes this but it requires adding a handful of
new dependencies to gitea and isn't compatible with the normal way you
guys recommend using a proxy so I don't think it's ready to send to you
as a PR, but if I can find a non-invasive way to fix it I will.
 - https://github.com/go-gitea/gitea/issues/18240

I hereby agree to the Code of Conduct published here:
8b89563bf1/CODE_OF_CONDUCT.md
I have read and understood the recommendations published here:
8b89563bf1/CONTRIBUTING.md

Thank you for your consideration.

---------

Co-authored-by: eyedeekay <idk@mulder>
Co-authored-by: wxiaoguang <wxiaoguang@gmail.com>
2023-07-24 07:18:17 +00:00

304 lines
8.9 KiB
Go

// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
// This code is highly inspired by endless go
package graceful
import (
"crypto/tls"
"net"
"os"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/proxyprotocol"
"code.gitea.io/gitea/modules/setting"
)
var (
// DefaultReadTimeOut default read timeout
DefaultReadTimeOut time.Duration
// DefaultWriteTimeOut default write timeout
DefaultWriteTimeOut time.Duration
// DefaultMaxHeaderBytes default max header bytes
DefaultMaxHeaderBytes int
// PerWriteWriteTimeout timeout for writes
PerWriteWriteTimeout = 30 * time.Second
// PerWriteWriteTimeoutKbTime is a timeout taking account of how much there is to be written
PerWriteWriteTimeoutKbTime = 10 * time.Second
)
// GetListener returns a listener from a GetListener function, which must have the
// signature: `func FunctioName(network, address string) (net.Listener, error)`.
// This determines the implementation of net.Listener which the server will use.`
// It is implemented in this way so that downstreams may specify the type of listener
// they want to provide Gitea on by default, such as with a hidden service or a p2p network
// No need to worry about "breaking" if there would be a refactoring for the Listeners. No compatibility-guarantee for this mechanism
var GetListener = DefaultGetListener
func init() {
DefaultMaxHeaderBytes = 0 // use http.DefaultMaxHeaderBytes - which currently is 1 << 20 (1MB)
}
// ServeFunction represents a listen.Accept loop
type ServeFunction = func(net.Listener) error
// Server represents our graceful server
type Server struct {
network string
address string
listener net.Listener
wg sync.WaitGroup
state state
lock *sync.RWMutex
BeforeBegin func(network, address string)
OnShutdown func()
PerWriteTimeout time.Duration
PerWritePerKbTimeout time.Duration
}
// NewServer creates a server on network at provided address
func NewServer(network, address, name string) *Server {
if GetManager().IsChild() {
log.Info("Restarting new %s server: %s:%s on PID: %d", name, network, address, os.Getpid())
} else {
log.Info("Starting new %s server: %s:%s on PID: %d", name, network, address, os.Getpid())
}
srv := &Server{
wg: sync.WaitGroup{},
state: stateInit,
lock: &sync.RWMutex{},
network: network,
address: address,
PerWriteTimeout: setting.PerWriteTimeout,
PerWritePerKbTimeout: setting.PerWritePerKbTimeout,
}
srv.BeforeBegin = func(network, addr string) {
log.Debug("Starting server on %s:%s (PID: %d)", network, addr, syscall.Getpid())
}
return srv
}
// ListenAndServe listens on the provided network address and then calls Serve
// to handle requests on incoming connections.
func (srv *Server) ListenAndServe(serve ServeFunction, useProxyProtocol bool) error {
go srv.awaitShutdown()
listener, err := GetListener(srv.network, srv.address)
if err != nil {
log.Error("Unable to GetListener: %v", err)
return err
}
// we need to wrap the listener to take account of our lifecycle
listener = newWrappedListener(listener, srv)
// Now we need to take account of ProxyProtocol settings...
if useProxyProtocol {
listener = &proxyprotocol.Listener{
Listener: listener,
ProxyHeaderTimeout: setting.ProxyProtocolHeaderTimeout,
AcceptUnknown: setting.ProxyProtocolAcceptUnknown,
}
}
srv.listener = listener
srv.BeforeBegin(srv.network, srv.address)
return srv.Serve(serve)
}
// ListenAndServeTLSConfig listens on the provided network address and then calls
// Serve to handle requests on incoming TLS connections.
func (srv *Server) ListenAndServeTLSConfig(tlsConfig *tls.Config, serve ServeFunction, useProxyProtocol, proxyProtocolTLSBridging bool) error {
go srv.awaitShutdown()
if tlsConfig.MinVersion == 0 {
tlsConfig.MinVersion = tls.VersionTLS12
}
listener, err := GetListener(srv.network, srv.address)
if err != nil {
log.Error("Unable to get Listener: %v", err)
return err
}
// we need to wrap the listener to take account of our lifecycle
listener = newWrappedListener(listener, srv)
// Now we need to take account of ProxyProtocol settings... If we're not bridging then we expect that the proxy will forward the connection to us
if useProxyProtocol && !proxyProtocolTLSBridging {
listener = &proxyprotocol.Listener{
Listener: listener,
ProxyHeaderTimeout: setting.ProxyProtocolHeaderTimeout,
AcceptUnknown: setting.ProxyProtocolAcceptUnknown,
}
}
// Now handle the tls protocol
listener = tls.NewListener(listener, tlsConfig)
// Now if we're bridging then we need the proxy to tell us who we're bridging for...
if useProxyProtocol && proxyProtocolTLSBridging {
listener = &proxyprotocol.Listener{
Listener: listener,
ProxyHeaderTimeout: setting.ProxyProtocolHeaderTimeout,
AcceptUnknown: setting.ProxyProtocolAcceptUnknown,
}
}
srv.listener = listener
srv.BeforeBegin(srv.network, srv.address)
return srv.Serve(serve)
}
// Serve accepts incoming HTTP connections on the wrapped listener l, creating a new
// service goroutine for each. The service goroutines read requests and then call
// handler to reply to them. Handler is typically nil, in which case the
// DefaultServeMux is used.
//
// In addition to the standard Serve behaviour each connection is added to a
// sync.Waitgroup so that all outstanding connections can be served before shutting
// down the server.
func (srv *Server) Serve(serve ServeFunction) error {
defer log.Debug("Serve() returning... (PID: %d)", syscall.Getpid())
srv.setState(stateRunning)
GetManager().RegisterServer()
err := serve(srv.listener)
log.Debug("Waiting for connections to finish... (PID: %d)", syscall.Getpid())
srv.wg.Wait()
srv.setState(stateTerminate)
GetManager().ServerDone()
// use of closed means that the listeners are closed - i.e. we should be shutting down - return nil
if err == nil || strings.Contains(err.Error(), "use of closed") || strings.Contains(err.Error(), "http: Server closed") {
return nil
}
return err
}
func (srv *Server) getState() state {
srv.lock.RLock()
defer srv.lock.RUnlock()
return srv.state
}
func (srv *Server) setState(st state) {
srv.lock.Lock()
defer srv.lock.Unlock()
srv.state = st
}
type filer interface {
File() (*os.File, error)
}
type wrappedListener struct {
net.Listener
stopped bool
server *Server
}
func newWrappedListener(l net.Listener, srv *Server) *wrappedListener {
return &wrappedListener{
Listener: l,
server: srv,
}
}
func (wl *wrappedListener) Accept() (net.Conn, error) {
var c net.Conn
// Set keepalive on TCPListeners connections.
if tcl, ok := wl.Listener.(*net.TCPListener); ok {
tc, err := tcl.AcceptTCP()
if err != nil {
return nil, err
}
_ = tc.SetKeepAlive(true) // see http.tcpKeepAliveListener
_ = tc.SetKeepAlivePeriod(3 * time.Minute) // see http.tcpKeepAliveListener
c = tc
} else {
var err error
c, err = wl.Listener.Accept()
if err != nil {
return nil, err
}
}
closed := int32(0)
c = &wrappedConn{
Conn: c,
server: wl.server,
closed: &closed,
perWriteTimeout: wl.server.PerWriteTimeout,
perWritePerKbTimeout: wl.server.PerWritePerKbTimeout,
}
wl.server.wg.Add(1)
return c, nil
}
func (wl *wrappedListener) Close() error {
if wl.stopped {
return syscall.EINVAL
}
wl.stopped = true
return wl.Listener.Close()
}
func (wl *wrappedListener) File() (*os.File, error) {
// returns a dup(2) - FD_CLOEXEC flag *not* set so the listening socket can be passed to child processes
return wl.Listener.(filer).File()
}
type wrappedConn struct {
net.Conn
server *Server
closed *int32
deadline time.Time
perWriteTimeout time.Duration
perWritePerKbTimeout time.Duration
}
func (w *wrappedConn) Write(p []byte) (n int, err error) {
if w.perWriteTimeout > 0 {
minTimeout := time.Duration(len(p)/1024) * w.perWritePerKbTimeout
minDeadline := time.Now().Add(minTimeout).Add(w.perWriteTimeout)
w.deadline = w.deadline.Add(minTimeout)
if minDeadline.After(w.deadline) {
w.deadline = minDeadline
}
_ = w.Conn.SetWriteDeadline(w.deadline)
}
return w.Conn.Write(p)
}
func (w *wrappedConn) Close() error {
if atomic.CompareAndSwapInt32(w.closed, 0, 1) {
defer func() {
if err := recover(); err != nil {
select {
case <-GetManager().IsHammer():
// Likely deadlocked request released at hammertime
log.Warn("Panic during connection close! %v. Likely there has been a deadlocked request which has been released by forced shutdown.", err)
default:
log.Error("Panic during connection close! %v", err)
}
}
}()
w.server.wg.Done()
}
return w.Conn.Close()
}