From 88d3dcae42770a1b34b69d64360c71d25bfbe0fe Mon Sep 17 00:00:00 2001 From: Kris Hamoud Date: Sun, 31 Jul 2016 02:04:54 -0700 Subject: [PATCH 01/10] added ip_hash load balancing updated tests fixed comment format fixed formatting, minor logic fix added newline to EOF updated logic, fixed tests added comment updated formatting updated test output fixed typo --- caddyhttp/proxy/policy.go | 44 +++++++++- caddyhttp/proxy/policy_test.go | 143 ++++++++++++++++++++++++++++--- caddyhttp/proxy/proxy.go | 4 +- caddyhttp/proxy/proxy_test.go | 4 +- caddyhttp/proxy/upstream.go | 7 +- caddyhttp/proxy/upstream_test.go | 16 ++-- 6 files changed, 189 insertions(+), 29 deletions(-) diff --git a/caddyhttp/proxy/policy.go b/caddyhttp/proxy/policy.go index 3a11b3ce..1737c6c5 100644 --- a/caddyhttp/proxy/policy.go +++ b/caddyhttp/proxy/policy.go @@ -1,8 +1,11 @@ package proxy import ( + "hash/fnv" "math" "math/rand" + "net" + "net/http" "sync" ) @@ -11,20 +14,21 @@ type HostPool []*UpstreamHost // Policy decides how a host will be selected from a pool. type Policy interface { - Select(pool HostPool) *UpstreamHost + Select(pool HostPool, r *http.Request) *UpstreamHost } func init() { RegisterPolicy("random", func() Policy { return &Random{} }) RegisterPolicy("least_conn", func() Policy { return &LeastConn{} }) RegisterPolicy("round_robin", func() Policy { return &RoundRobin{} }) + RegisterPolicy("ip_hash", func() Policy { return &IPHash{} }) } // Random is a policy that selects up hosts from a pool at random. type Random struct{} // Select selects an up host at random from the specified pool. -func (r *Random) Select(pool HostPool) *UpstreamHost { +func (r *Random) Select(pool HostPool, request *http.Request) *UpstreamHost { // Because the number of available hosts isn't known // up front, the host is selected via reservoir sampling @@ -53,7 +57,7 @@ type LeastConn struct{} // Select selects the up host with the least number of connections in the // pool. If more than one host has the same least number of connections, // one of the hosts is chosen at random. -func (r *LeastConn) Select(pool HostPool) *UpstreamHost { +func (r *LeastConn) Select(pool HostPool, request *http.Request) *UpstreamHost { var bestHost *UpstreamHost count := 0 leastConn := int64(math.MaxInt64) @@ -86,7 +90,7 @@ type RoundRobin struct { } // Select selects an up host from the pool using a round robin ordering scheme. -func (r *RoundRobin) Select(pool HostPool) *UpstreamHost { +func (r *RoundRobin) Select(pool HostPool, request *http.Request) *UpstreamHost { poolLen := uint32(len(pool)) r.mutex.Lock() defer r.mutex.Unlock() @@ -100,3 +104,35 @@ func (r *RoundRobin) Select(pool HostPool) *UpstreamHost { } return nil } + +// IPHash is a policy that selects hosts based on hashing the request ip +type IPHash struct{} + +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +// Select selects an up host from the pool using a round robin ordering scheme. +func (r *IPHash) Select(pool HostPool, request *http.Request) *UpstreamHost { + poolLen := uint32(len(pool)) + clientIP, _, err := net.SplitHostPort(request.RemoteAddr) + if err != nil { + clientIP = request.RemoteAddr + } + hash := hash(clientIP) + for { + if poolLen == 0 { + break + } + index := hash % poolLen + host := pool[index] + if host.Available() { + return host + } + pool = append(pool[:index], pool[index+1:]...) + poolLen-- + } + return nil +} diff --git a/caddyhttp/proxy/policy_test.go b/caddyhttp/proxy/policy_test.go index 8da1cadb..a736d8ca 100644 --- a/caddyhttp/proxy/policy_test.go +++ b/caddyhttp/proxy/policy_test.go @@ -21,7 +21,7 @@ func TestMain(m *testing.M) { type customPolicy struct{} -func (r *customPolicy) Select(pool HostPool) *UpstreamHost { +func (r *customPolicy) Select(pool HostPool, request *http.Request) *UpstreamHost { return pool[0] } @@ -43,37 +43,39 @@ func testPool() HostPool { func TestRoundRobinPolicy(t *testing.T) { pool := testPool() rrPolicy := &RoundRobin{} - h := rrPolicy.Select(pool) + request, _ := http.NewRequest("GET", "/", nil) + + h := rrPolicy.Select(pool, request) // First selected host is 1, because counter starts at 0 // and increments before host is selected if h != pool[1] { t.Error("Expected first round robin host to be second host in the pool.") } - h = rrPolicy.Select(pool) + h = rrPolicy.Select(pool, request) if h != pool[2] { t.Error("Expected second round robin host to be third host in the pool.") } - h = rrPolicy.Select(pool) + h = rrPolicy.Select(pool, request) if h != pool[0] { t.Error("Expected third round robin host to be first host in the pool.") } // mark host as down pool[1].Unhealthy = true - h = rrPolicy.Select(pool) + h = rrPolicy.Select(pool, request) if h != pool[2] { t.Error("Expected to skip down host.") } // mark host as up pool[1].Unhealthy = false - h = rrPolicy.Select(pool) + h = rrPolicy.Select(pool, request) if h == pool[2] { t.Error("Expected to balance evenly among healthy hosts") } // mark host as full pool[1].Conns = 1 pool[1].MaxConns = 1 - h = rrPolicy.Select(pool) + h = rrPolicy.Select(pool, request) if h != pool[2] { t.Error("Expected to skip full host.") } @@ -82,14 +84,16 @@ func TestRoundRobinPolicy(t *testing.T) { func TestLeastConnPolicy(t *testing.T) { pool := testPool() lcPolicy := &LeastConn{} + request, _ := http.NewRequest("GET", "/", nil) + pool[0].Conns = 10 pool[1].Conns = 10 - h := lcPolicy.Select(pool) + h := lcPolicy.Select(pool, request) if h != pool[2] { t.Error("Expected least connection host to be third host.") } pool[2].Conns = 100 - h = lcPolicy.Select(pool) + h = lcPolicy.Select(pool, request) if h != pool[0] && h != pool[1] { t.Error("Expected least connection host to be first or second host.") } @@ -98,8 +102,127 @@ func TestLeastConnPolicy(t *testing.T) { func TestCustomPolicy(t *testing.T) { pool := testPool() customPolicy := &customPolicy{} - h := customPolicy.Select(pool) + request, _ := http.NewRequest("GET", "/", nil) + + h := customPolicy.Select(pool, request) if h != pool[0] { t.Error("Expected custom policy host to be the first host.") } } + +func TestIPHashPolicy(t *testing.T) { + pool := testPool() + ipHash := &IPHash{} + request, _ := http.NewRequest("GET", "/", nil) + // We should be able to predict where every request is routed. + request.RemoteAddr = "172.0.0.1:80" + h := ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + request.RemoteAddr = "172.0.0.2:80" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + request.RemoteAddr = "172.0.0.3:80" + h = ipHash.Select(pool, request) + if h != pool[2] { + t.Error("Expected ip hash policy host to be the third host.") + } + request.RemoteAddr = "172.0.0.4:80" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + + // we should get the same results without a port + request.RemoteAddr = "172.0.0.1" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + request.RemoteAddr = "172.0.0.2" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + request.RemoteAddr = "172.0.0.3" + h = ipHash.Select(pool, request) + if h != pool[2] { + t.Error("Expected ip hash policy host to be the third host.") + } + request.RemoteAddr = "172.0.0.4" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + + // we should get a healthy host if the original host is unhealthy and a + // healthy host is available + request.RemoteAddr = "172.0.0.1" + pool[1].Unhealthy = true + h = ipHash.Select(pool, request) + if h != pool[0] { + t.Error("Expected ip hash policy host to be the first host.") + } + + request.RemoteAddr = "172.0.0.2" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + pool[1].Unhealthy = false + + request.RemoteAddr = "172.0.0.3" + pool[2].Unhealthy = true + h = ipHash.Select(pool, request) + if h != pool[0] { + t.Error("Expected ip hash policy host to be the first host.") + } + request.RemoteAddr = "172.0.0.4" + h = ipHash.Select(pool, request) + if h != pool[0] { + t.Error("Expected ip hash policy host to be the first host.") + } + + // We should be able to resize the host pool and still be able to predict + // where a request will be routed with the same IP's used above + pool = []*UpstreamHost{ + { + Name: workableServer.URL, // this should resolve (healthcheck test) + }, + { + Name: "http://localhost:99998", // this shouldn't + }, + } + pool = HostPool(pool) + request.RemoteAddr = "172.0.0.1:80" + h = ipHash.Select(pool, request) + if h != pool[0] { + t.Error("Expected ip hash policy host to be the first host.") + } + request.RemoteAddr = "172.0.0.2:80" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + request.RemoteAddr = "172.0.0.3:80" + h = ipHash.Select(pool, request) + if h != pool[0] { + t.Error("Expected ip hash policy host to be the first host.") + } + request.RemoteAddr = "172.0.0.4:80" + h = ipHash.Select(pool, request) + if h != pool[1] { + t.Error("Expected ip hash policy host to be the second host.") + } + + // We should get nil when there are no healthy hosts + pool[0].Unhealthy = true + pool[1].Unhealthy = true + h = ipHash.Select(pool, request) + if h != nil { + t.Error("Expected ip hash policy host to be nil.") + } +} diff --git a/caddyhttp/proxy/proxy.go b/caddyhttp/proxy/proxy.go index 6917bcd6..5efcdb0b 100644 --- a/caddyhttp/proxy/proxy.go +++ b/caddyhttp/proxy/proxy.go @@ -27,7 +27,7 @@ type Upstream interface { // The path this upstream host should be routed on From() string // Selects an upstream host to be routed to. - Select() *UpstreamHost + Select(*http.Request) *UpstreamHost // Checks if subpath is not an ignored path AllowedPath(string) bool } @@ -93,7 +93,7 @@ func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { // hosts until timeout (or until we get a nil host). start := time.Now() for time.Now().Sub(start) < tryDuration { - host := upstream.Select() + host := upstream.Select(r) if host == nil { return http.StatusBadGateway, errUnreachable } diff --git a/caddyhttp/proxy/proxy_test.go b/caddyhttp/proxy/proxy_test.go index de9fa53b..c62ea3c9 100644 --- a/caddyhttp/proxy/proxy_test.go +++ b/caddyhttp/proxy/proxy_test.go @@ -736,7 +736,7 @@ func (u *fakeUpstream) From() string { return u.from } -func (u *fakeUpstream) Select() *UpstreamHost { +func (u *fakeUpstream) Select(r *http.Request) *UpstreamHost { if u.host == nil { uri, err := url.Parse(u.name) if err != nil { @@ -781,7 +781,7 @@ func (u *fakeWsUpstream) From() string { return "/" } -func (u *fakeWsUpstream) Select() *UpstreamHost { +func (u *fakeWsUpstream) Select(r *http.Request) *UpstreamHost { uri, _ := url.Parse(u.name) return &UpstreamHost{ Name: u.name, diff --git a/caddyhttp/proxy/upstream.go b/caddyhttp/proxy/upstream.go index e5a40ce2..85eaa971 100644 --- a/caddyhttp/proxy/upstream.go +++ b/caddyhttp/proxy/upstream.go @@ -346,7 +346,7 @@ func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) { } } -func (u *staticUpstream) Select() *UpstreamHost { +func (u *staticUpstream) Select(r *http.Request) *UpstreamHost { pool := u.Hosts if len(pool) == 1 { if !pool[0].Available() { @@ -364,11 +364,10 @@ func (u *staticUpstream) Select() *UpstreamHost { if allUnavailable { return nil } - if u.Policy == nil { - return (&Random{}).Select(pool) + return (&Random{}).Select(pool, r) } - return u.Policy.Select(pool) + return u.Policy.Select(pool, r) } func (u *staticUpstream) AllowedPath(requestPath string) bool { diff --git a/caddyhttp/proxy/upstream_test.go b/caddyhttp/proxy/upstream_test.go index 4fb990f6..1cf0e041 100644 --- a/caddyhttp/proxy/upstream_test.go +++ b/caddyhttp/proxy/upstream_test.go @@ -1,11 +1,11 @@ package proxy import ( + "github.com/mholt/caddy/caddyfile" + "net/http" "strings" "testing" "time" - - "github.com/mholt/caddy/caddyfile" ) func TestNewHost(t *testing.T) { @@ -72,14 +72,15 @@ func TestSelect(t *testing.T) { FailTimeout: 10 * time.Second, MaxFails: 1, } + r, _ := http.NewRequest("GET", "/", nil) upstream.Hosts[0].Unhealthy = true upstream.Hosts[1].Unhealthy = true upstream.Hosts[2].Unhealthy = true - if h := upstream.Select(); h != nil { + if h := upstream.Select(r); h != nil { t.Error("Expected select to return nil as all host are down") } upstream.Hosts[2].Unhealthy = false - if h := upstream.Select(); h == nil { + if h := upstream.Select(r); h == nil { t.Error("Expected select to not return nil") } upstream.Hosts[0].Conns = 1 @@ -88,11 +89,11 @@ func TestSelect(t *testing.T) { upstream.Hosts[1].MaxConns = 1 upstream.Hosts[2].Conns = 1 upstream.Hosts[2].MaxConns = 1 - if h := upstream.Select(); h != nil { + if h := upstream.Select(r); h != nil { t.Error("Expected select to return nil as all hosts are full") } upstream.Hosts[2].Conns = 0 - if h := upstream.Select(); h == nil { + if h := upstream.Select(r); h == nil { t.Error("Expected select to not return nil") } } @@ -188,6 +189,7 @@ func TestParseBlockHealthCheck(t *testing.T) { } func TestParseBlock(t *testing.T) { + r, _ := http.NewRequest("GET", "/", nil) tests := []struct { config string }{ @@ -207,7 +209,7 @@ func TestParseBlock(t *testing.T) { t.Error("Expected no error. Got:", err.Error()) } for _, upstream := range upstreams { - headers := upstream.Select().UpstreamHeaders + headers := upstream.Select(r).UpstreamHeaders if _, ok := headers["Host"]; !ok { t.Errorf("Test %d: Could not find the Host header", i+1) From 3d43c5b6979eeae7b3225e1cb5314cb5a5eabb89 Mon Sep 17 00:00:00 2001 From: Tw Date: Tue, 2 Aug 2016 15:28:12 +0800 Subject: [PATCH 02/10] tls: fix TestStandaloneTLSTicketKeyRotation data race ================== WARNING: DATA RACE Write at 0x00c42049d300 by goroutine 26: github.com/mholt/caddy/caddytls.standaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto.go:230 +0x698 Previous read at 0x00c42049d300 by goroutine 25: github.com/mholt/caddy/caddytls.TestStandaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto_test.go:113 +0x413 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 Goroutine 26 (running) created at: github.com/mholt/caddy/caddytls.TestStandaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto_test.go:101 +0x2a4 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 Goroutine 25 (running) created at: testing.(*T).Run() /home/tw/goroot/src/testing/testing.go:646 +0x52f testing.RunTests.func1() /home/tw/goroot/src/testing/testing.go:793 +0xb9 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 testing.RunTests() /home/tw/goroot/src/testing/testing.go:799 +0x4b5 testing.(*M).Run() /home/tw/goroot/src/testing/testing.go:743 +0x12f github.com/mholt/caddy/caddytls.TestMain() /home/tw/golib/src/github.com/mholt/caddy/caddytls/setup_test.go:27 +0x133 main.main() github.com/mholt/caddy/caddytls/_test/_testmain.go:116 +0x1b1 ================== ================== WARNING: DATA RACE Write at 0x00c4204aa6c0 by goroutine 26: github.com/mholt/caddy/caddytls.TestStandaloneTLSTicketKeyRotation.func2() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto_test.go:93 +0x56 github.com/mholt/caddy/caddytls.standaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto.go:233 +0x638 Previous read at 0x00c4204aa6c0 by goroutine 25: github.com/mholt/caddy/caddytls.TestStandaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto_test.go:108 +0x391 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 Goroutine 26 (running) created at: github.com/mholt/caddy/caddytls.TestStandaloneTLSTicketKeyRotation() /home/tw/golib/src/github.com/mholt/caddy/caddytls/crypto_test.go:101 +0x2a4 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 Goroutine 25 (running) created at: testing.(*T).Run() /home/tw/goroot/src/testing/testing.go:646 +0x52f testing.RunTests.func1() /home/tw/goroot/src/testing/testing.go:793 +0xb9 testing.tRunner() /home/tw/goroot/src/testing/testing.go:610 +0xc9 testing.RunTests() /home/tw/goroot/src/testing/testing.go:799 +0x4b5 testing.(*M).Run() /home/tw/goroot/src/testing/testing.go:743 +0x12f github.com/mholt/caddy/caddytls.TestMain() /home/tw/golib/src/github.com/mholt/caddy/caddytls/setup_test.go:27 +0x133 main.main() github.com/mholt/caddy/caddytls/_test/_testmain.go:116 +0x1b1 ================== Signed-off-by: Tw --- caddytls/crypto_test.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/caddytls/crypto_test.go b/caddytls/crypto_test.go index e4697ec4..bc96bd3f 100644 --- a/caddytls/crypto_test.go +++ b/caddytls/crypto_test.go @@ -79,19 +79,22 @@ func PrivateKeyBytes(key crypto.PrivateKey) []byte { } func TestStandaloneTLSTicketKeyRotation(t *testing.T) { + type syncPkt struct { + ticketKey [32]byte + keysInUse int + } + tlsGovChan := make(chan struct{}) defer close(tlsGovChan) - callSync := make(chan bool, 1) + callSync := make(chan *syncPkt, 1) defer close(callSync) oldHook := setSessionTicketKeysTestHook defer func() { setSessionTicketKeysTestHook = oldHook }() - var keysInUse [][32]byte setSessionTicketKeysTestHook = func(keys [][32]byte) [][32]byte { - keysInUse = keys - callSync <- true + callSync <- &syncPkt{keys[0], len(keys)} return keys } @@ -104,17 +107,17 @@ func TestStandaloneTLSTicketKeyRotation(t *testing.T) { var lastTicketKey [32]byte for { select { - case <-callSync: - if lastTicketKey == keysInUse[0] { + case pkt := <-callSync: + if lastTicketKey == pkt.ticketKey { close(tlsGovChan) t.Errorf("The same TLS ticket key has been used again (not rotated): %x.", lastTicketKey) return } - lastTicketKey = keysInUse[0] + lastTicketKey = pkt.ticketKey rounds++ - if rounds <= NumTickets && len(keysInUse) != rounds { + if rounds <= NumTickets && pkt.keysInUse != rounds { close(tlsGovChan) - t.Errorf("Expected TLS ticket keys in use: %d; Got instead: %d.", rounds, len(keysInUse)) + t.Errorf("Expected TLS ticket keys in use: %d; Got instead: %d.", rounds, pkt.keysInUse) return } if c.SessionTicketsDisabled == true { From 5ac04b91bb92829711bf15499f6907593cd49728 Mon Sep 17 00:00:00 2001 From: Matthew Holt Date: Tue, 2 Aug 2016 10:55:38 -0600 Subject: [PATCH 03/10] Add -race to CI tests; use Go 1.6.3 --- .travis.yml | 4 ++-- appveyor.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index c121a702..0843e114 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.6.2 + - 1.6.3 - tip env: @@ -24,7 +24,7 @@ script: - diff <(echo -n) <(gofmt -s -d .) - ineffassign . - go vet ./... - - go test ./... + - go test -race ./... after_script: - golint ./... diff --git a/appveyor.yml b/appveyor.yml index 43688103..2b327fb4 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -10,8 +10,8 @@ environment: install: - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.2.windows-amd64.zip - - 7z x go1.6.2.windows-amd64.zip -y -oC:\ > NUL + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.3.windows-amd64.zip + - 7z x go1.6.3.windows-amd64.zip -y -oC:\ > NUL - go version - go env - go get -t ./... @@ -23,7 +23,7 @@ build: off test_script: - go vet ./... - - go test ./... + - go test -race ./... - ineffassign . after_test: From a24e36176105d9d42166156c1147e5e0465e2064 Mon Sep 17 00:00:00 2001 From: Matthew Holt Date: Tue, 2 Aug 2016 10:59:16 -0600 Subject: [PATCH 04/10] Enable cgo for CI tests so race detector can run --- .travis.yml | 3 --- appveyor.yml | 1 - 2 files changed, 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0843e114..7f45cbed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,9 +4,6 @@ go: - 1.6.3 - tip -env: - - CGO_ENABLED=0 - before_install: # Decrypts a script that installs an authenticated cookie # for git to use when cloning from googlesource.com. diff --git a/appveyor.yml b/appveyor.yml index 2b327fb4..ba913179 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,7 +6,6 @@ clone_folder: c:\gopath\src\github.com\mholt\caddy environment: GOPATH: c:\gopath - CGO_ENABLED: 0 install: - rmdir c:\go /s /q From c110b27ef53484f03509f657b1e8c8cf5e439e64 Mon Sep 17 00:00:00 2001 From: s7v7nislands Date: Wed, 3 Aug 2016 11:01:36 +0800 Subject: [PATCH 05/10] improve rlimit usage (#982) * improve rlimit usage * fix windows build * fix code style --- caddy.go | 20 -------------------- rlimit_posix.go | 23 +++++++++++++++++++++++ rlimit_windows.go | 6 ++++++ 3 files changed, 29 insertions(+), 20 deletions(-) create mode 100644 rlimit_posix.go create mode 100644 rlimit_windows.go diff --git a/caddy.go b/caddy.go index 401c6fc6..1b1caf54 100644 --- a/caddy.go +++ b/caddy.go @@ -21,8 +21,6 @@ import ( "log" "net" "os" - "os/exec" - "runtime" "strconv" "strings" "sync" @@ -725,24 +723,6 @@ func IsLoopback(addr string) bool { strings.HasPrefix(host, "127.") } -// checkFdlimit issues a warning if the OS limit for -// max file descriptors is below a recommended minimum. -func checkFdlimit() { - const min = 8192 - - // Warn if ulimit is too low for production sites - if runtime.GOOS == "linux" || runtime.GOOS == "darwin" { - out, err := exec.Command("sh", "-c", "ulimit -n").Output() // use sh because ulimit isn't in Linux $PATH - if err == nil { - lim, err := strconv.Atoi(string(bytes.TrimSpace(out))) - if err == nil && lim < min { - fmt.Printf("WARNING: File descriptor limit %d is too low for production servers. "+ - "At least %d is recommended. Fix with \"ulimit -n %d\".\n", lim, min, min) - } - } - } -} - // Upgrade re-launches the process, preserving the listeners // for a graceful restart. It does NOT load new configuration; // it only starts the process anew with a fresh binary. diff --git a/rlimit_posix.go b/rlimit_posix.go new file mode 100644 index 00000000..e6398776 --- /dev/null +++ b/rlimit_posix.go @@ -0,0 +1,23 @@ +// +build !windows + +package caddy + +import ( + "fmt" + "syscall" +) + +// checkFdlimit issues a warning if the OS limit for +// max file descriptors is below a recommended minimum. +func checkFdlimit() { + const min = 8192 + + // Warn if ulimit is too low for production sites + rlimit := &syscall.Rlimit{} + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimit) + if err == nil && rlimit.Cur < min { + fmt.Printf("WARNING: File descriptor limit %d is too low for production servers. "+ + "At least %d is recommended. Fix with \"ulimit -n %d\".\n", rlimit.Cur, min, min) + } + +} diff --git a/rlimit_windows.go b/rlimit_windows.go new file mode 100644 index 00000000..0288102f --- /dev/null +++ b/rlimit_windows.go @@ -0,0 +1,6 @@ +package caddy + +// checkFdlimit issues a warning if the OS limit for +// max file descriptors is below a recommended minimum. +func checkFdlimit() { +} From 94c63e42d6aef08064b6119af87e53140f19eacb Mon Sep 17 00:00:00 2001 From: Tw Date: Thu, 4 Aug 2016 13:01:52 +0800 Subject: [PATCH 06/10] proxy: add Host header checking fix issue #993 Signed-off-by: Tw --- caddyhttp/proxy/proxy_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/caddyhttp/proxy/proxy_test.go b/caddyhttp/proxy/proxy_test.go index c62ea3c9..3f244afb 100644 --- a/caddyhttp/proxy/proxy_test.go +++ b/caddyhttp/proxy/proxy_test.go @@ -357,9 +357,11 @@ func TestUpstreamHeadersUpdate(t *testing.T) { defer log.SetOutput(os.Stderr) var actualHeaders http.Header + var actualHost string backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Hello, client")) actualHeaders = r.Header + actualHost = r.Host })) defer backend.Close() @@ -371,6 +373,7 @@ func TestUpstreamHeadersUpdate(t *testing.T) { "+Add-Me": {"Add-Value"}, "-Remove-Me": {""}, "Replace-Me": {"{hostname}"}, + "Host": {"{>Host}"}, } // set up proxy p := &Proxy{ @@ -385,10 +388,12 @@ func TestUpstreamHeadersUpdate(t *testing.T) { } w := httptest.NewRecorder() + const expectHost = "example.com" //add initial headers r.Header.Add("Merge-Me", "Initial") r.Header.Add("Remove-Me", "Remove-Value") r.Header.Add("Replace-Me", "Replace-Value") + r.Header.Add("Host", expectHost) p.ServeHTTP(w, r) @@ -421,6 +426,10 @@ func TestUpstreamHeadersUpdate(t *testing.T) { t.Errorf("Request sent to upstream backend should replace value of %v header with %v. Instead value was %v", headerKey, headerValue, value) } + if actualHost != expectHost { + t.Errorf("Request sent to upstream backend should have value of Host with %s, but got %s", expectHost, actualHost) + } + } func TestDownstreamHeadersUpdate(t *testing.T) { From 22a4b6cde285632a8efe5d80c2e60abbd29900d3 Mon Sep 17 00:00:00 2001 From: Daniel van Dorp Date: Fri, 5 Aug 2016 14:04:30 +0200 Subject: [PATCH 07/10] dist/init/linux-sysvinit: fix minor typo in DAEMONOPTS --- dist/init/linux-sysvinit/caddy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/init/linux-sysvinit/caddy b/dist/init/linux-sysvinit/caddy index 384a2725..12b03155 100644 --- a/dist/init/linux-sysvinit/caddy +++ b/dist/init/linux-sysvinit/caddy @@ -20,7 +20,7 @@ DAEMONUSER=www-data PIDFILE=/var/run/$NAME.pid LOGFILE=/var/log/$NAME.log CONFIGFILE=/etc/caddy/Caddyfile -DAEMONOPTS="-agree=true --pidfile=$PIDFILE log=$LOGFILE -conf=$CONFIGFILE" +DAEMONOPTS="-agree=true -pidfile=$PIDFILE -log=$LOGFILE -conf=$CONFIGFILE" USERBIND="$(which setcap) cap_net_bind_service=+ep" STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}" From f3a3bf6204841cb803f44e9a882796c7203946b8 Mon Sep 17 00:00:00 2001 From: Daniel van Dorp Date: Fri, 5 Aug 2016 16:15:32 +0200 Subject: [PATCH 08/10] dist/init/linux-sysvinit: improve legacy compatibility (#1002) * dist/init/linux-sysvinit: pass --oknodo for --start as well * dist/init/linux-sysvinit: manually rm PIDFILE Since start-stop-daemon --remove-pidfile is new and not present everywhere. --- dist/init/linux-sysvinit/caddy | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dist/init/linux-sysvinit/caddy b/dist/init/linux-sysvinit/caddy index 12b03155..52d89acf 100644 --- a/dist/init/linux-sysvinit/caddy +++ b/dist/init/linux-sysvinit/caddy @@ -37,12 +37,13 @@ ulimit -n 8192 start() { $USERBIND $DAEMON start-stop-daemon --start --quiet --make-pidfile --pidfile $PIDFILE \ - --background --chuid $DAEMONUSER --exec $DAEMON -- $DAEMONOPTS + --background --chuid $DAEMONUSER --oknodo --exec $DAEMON -- $DAEMONOPTS } stop() { - start-stop-daemon --stop --quiet --remove-pidfile --pidfile $PIDFILE \ - --retry=$STOP_SCHEDULE --name $NAME --oknodo + start-stop-daemon --stop --quiet --pidfile $PIDFILE --retry=$STOP_SCHEDULE \ + --name $NAME --oknodo + rm -f $PIDFILE } reload() { From 3a4f8e8d0c32f379cc3d710caca2b2b5a6937f7b Mon Sep 17 00:00:00 2001 From: Daniel van Dorp Date: Fri, 5 Aug 2016 16:33:46 +0200 Subject: [PATCH 09/10] dist/init/linux-sysvinit: execute setcap directly `$(which setcap)` might evaluate to nothing, and this way the error thrown will be more clear. If setcap is not available on Debian/Ubuntu, you can install the package `libcap2-bin` --- dist/init/linux-sysvinit/caddy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/init/linux-sysvinit/caddy b/dist/init/linux-sysvinit/caddy index 52d89acf..70ddd322 100644 --- a/dist/init/linux-sysvinit/caddy +++ b/dist/init/linux-sysvinit/caddy @@ -22,7 +22,7 @@ LOGFILE=/var/log/$NAME.log CONFIGFILE=/etc/caddy/Caddyfile DAEMONOPTS="-agree=true -pidfile=$PIDFILE -log=$LOGFILE -conf=$CONFIGFILE" -USERBIND="$(which setcap) cap_net_bind_service=+ep" +USERBIND="setcap cap_net_bind_service=+ep" STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}" test -x $DAEMON || exit 0 From 5b5e36529544209b0c9a3e389333809f47c30535 Mon Sep 17 00:00:00 2001 From: Nimi Wariboko Jr Date: Fri, 5 Aug 2016 15:41:32 -0700 Subject: [PATCH 10/10] Instead of treating 0 is a default value, use http.DefaultMaxIdleConnsPerHost --- caddyhttp/proxy/proxy.go | 2 +- caddyhttp/proxy/proxy_test.go | 6 +++--- caddyhttp/proxy/reverseproxy.go | 7 +++++-- caddyhttp/proxy/upstream.go | 4 ++++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/caddyhttp/proxy/proxy.go b/caddyhttp/proxy/proxy.go index ed4383dc..d1d69541 100644 --- a/caddyhttp/proxy/proxy.go +++ b/caddyhttp/proxy/proxy.go @@ -108,7 +108,7 @@ func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { if nameURL, err := url.Parse(host.Name); err == nil { outreq.Host = nameURL.Host if proxy == nil { - proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, 0) + proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, http.DefaultMaxIdleConnsPerHost) } // use upstream credentials by default diff --git a/caddyhttp/proxy/proxy_test.go b/caddyhttp/proxy/proxy_test.go index 866cde95..9b94e6ec 100644 --- a/caddyhttp/proxy/proxy_test.go +++ b/caddyhttp/proxy/proxy_test.go @@ -716,7 +716,7 @@ func newFakeUpstream(name string, insecure bool) *fakeUpstream { from: "/", host: &UpstreamHost{ Name: name, - ReverseProxy: NewSingleHostReverseProxy(uri, "", 0), + ReverseProxy: NewSingleHostReverseProxy(uri, "", http.DefaultMaxIdleConnsPerHost), }, } if insecure { @@ -744,7 +744,7 @@ func (u *fakeUpstream) Select() *UpstreamHost { } u.host = &UpstreamHost{ Name: u.name, - ReverseProxy: NewSingleHostReverseProxy(uri, u.without, 0), + ReverseProxy: NewSingleHostReverseProxy(uri, u.without, http.DefaultMaxIdleConnsPerHost), } } return u.host @@ -785,7 +785,7 @@ func (u *fakeWsUpstream) Select() *UpstreamHost { uri, _ := url.Parse(u.name) return &UpstreamHost{ Name: u.name, - ReverseProxy: NewSingleHostReverseProxy(uri, u.without, 0), + ReverseProxy: NewSingleHostReverseProxy(uri, u.without, http.DefaultMaxIdleConnsPerHost), UpstreamHeaders: http.Header{ "Connection": {"{>Connection}"}, "Upgrade": {"{>Upgrade}"}}, diff --git a/caddyhttp/proxy/reverseproxy.go b/caddyhttp/proxy/reverseproxy.go index eadd7e3a..30ac3991 100644 --- a/caddyhttp/proxy/reverseproxy.go +++ b/caddyhttp/proxy/reverseproxy.go @@ -122,7 +122,10 @@ func NewSingleHostReverseProxy(target *url.URL, without string, keepalive int) * rp.Transport = &http.Transport{ Dial: socketDial(target.String()), } - } else if keepalive != 0 { + } else if keepalive != http.DefaultMaxIdleConnsPerHost { + // if keepalive is equal to the default, + // just use default transport, to avoid creating + // a brand new transport rp.Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ @@ -132,7 +135,7 @@ func NewSingleHostReverseProxy(target *url.URL, without string, keepalive int) * TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } - if keepalive < 0 { + if keepalive == 0 { rp.Transport.(*http.Transport).DisableKeepAlives = true } else { rp.Transport.(*http.Transport).MaxIdleConnsPerHost = keepalive diff --git a/caddyhttp/proxy/upstream.go b/caddyhttp/proxy/upstream.go index fcbf15d9..36620995 100644 --- a/caddyhttp/proxy/upstream.go +++ b/caddyhttp/proxy/upstream.go @@ -55,6 +55,7 @@ func NewStaticUpstreams(c caddyfile.Dispenser) ([]Upstream, error) { FailTimeout: 10 * time.Second, MaxFails: 1, MaxConns: 0, + KeepAlive: http.DefaultMaxIdleConnsPerHost, } if !c.Args(&upstream.from) { @@ -321,6 +322,9 @@ func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error { if err != nil { return err } + if n < 0 { + return c.ArgErr() + } u.KeepAlive = n default: return c.Errf("unknown property '%s'", c.Val())