0
Fork 0
mirror of https://github.com/caddyserver/caddy.git synced 2024-12-23 22:27:38 -05:00
caddy/caddytls/maintain.go
Matthew Holt 0e34c7c970
tls: Fix background certificate renewals that use TLS-SNI challenge
The loop which performs renewals in the background obtains a read lock
on the certificate cache map, so that it can be safely iterated. Before
this fix, it would obtain the renewals in the read lock. This has been
fine, except that the TLS-SNI challenge, when invoked after Caddy has
already started, requires adding a certificate to the cache. Doing this
requires an exclusive write lock. But it cannot obtain a write lock
because a read lock is obtained higher in the stack, while the loop
iterates. In other words, it's a deadlock.

I was able to reproduce this issue consistently locally, after jumping
through many hoops to force a renewal in a short time that bypasses
Let's Encrypt's authz caching. I was also able to verify that by queuing
renewals (like we do deletions and OCSP updates), lock contention is
relieved and the deadlock is avoided.

This only affects background renewals where the TLS-SNI(-01) challenge
are used. Users report seeing strange errors in the logs after this
happens ("tls: client offered an unsupported, maximum protocol version
of 301"), but I was not able to reproduce these locally. I was also not
able to reproduce the leak of sockets which are left in CLOSE_WAIT.
I am not sure if those are symptoms of running in production on Linux
and are related to this bug, or not.

Either way, this is an important fix. I do not yet know the ripple
effects this will have on other symptoms we've been chasing. But it
definitely resolves a deadlock during renewals.
2017-01-21 14:39:36 -07:00

302 lines
9.4 KiB
Go

package caddytls
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
"github.com/mholt/caddy"
"golang.org/x/crypto/ocsp"
)
func init() {
// maintain assets while this package is imported, which is
// always. we don't ever stop it, since we need it running.
go maintainAssets(make(chan struct{}))
}
const (
// RenewInterval is how often to check certificates for renewal.
RenewInterval = 12 * time.Hour
// RenewDurationBefore is how long before expiration to renew certificates.
RenewDurationBefore = (24 * time.Hour) * 30
// OCSPInterval is how often to check if OCSP stapling needs updating.
OCSPInterval = 1 * time.Hour
)
// maintainAssets is a permanently-blocking function
// that loops indefinitely and, on a regular schedule, checks
// certificates for expiration and initiates a renewal of certs
// that are expiring soon. It also updates OCSP stapling and
// performs other maintenance of assets. It should only be
// called once per process.
//
// You must pass in the channel which you'll close when
// maintenance should stop, to allow this goroutine to clean up
// after itself and unblock. (Not that you HAVE to stop it...)
func maintainAssets(stopChan chan struct{}) {
renewalTicker := time.NewTicker(RenewInterval)
ocspTicker := time.NewTicker(OCSPInterval)
for {
select {
case <-renewalTicker.C:
log.Println("[INFO] Scanning for expiring certificates")
RenewManagedCertificates(false)
log.Println("[INFO] Done checking certificates")
case <-ocspTicker.C:
log.Println("[INFO] Scanning for stale OCSP staples")
UpdateOCSPStaples()
DeleteOldStapleFiles()
log.Println("[INFO] Done checking OCSP staples")
case <-stopChan:
renewalTicker.Stop()
ocspTicker.Stop()
log.Println("[INFO] Stopped background maintenance routine")
return
}
}
}
// RenewManagedCertificates renews managed certificates.
func RenewManagedCertificates(allowPrompts bool) (err error) {
var renewQueue, deleteQueue []Certificate
visitedNames := make(map[string]struct{})
certCacheMu.RLock()
for name, cert := range certCache {
if !cert.Config.Managed || cert.Config.SelfSigned {
continue
}
// the list of names on this cert should never be empty...
if cert.Names == nil || len(cert.Names) == 0 {
log.Printf("[WARNING] Certificate keyed by '%s' has no names: %v - removing from cache", name, cert.Names)
deleteQueue = append(deleteQueue, cert)
continue
}
// skip names whose certificate we've already renewed
if _, ok := visitedNames[name]; ok {
continue
}
for _, name := range cert.Names {
visitedNames[name] = struct{}{}
}
// if its time is up or ending soon, we need to try to renew it
timeLeft := cert.NotAfter.Sub(time.Now().UTC())
if timeLeft < RenewDurationBefore {
log.Printf("[INFO] Certificate for %v expires in %v; attempting renewal", cert.Names, timeLeft)
if cert.Config == nil {
log.Printf("[ERROR] %s: No associated TLS config; unable to renew", name)
continue
}
// queue for renewal when we aren't in a read lock anymore
// (the TLS-SNI challenge will need a write lock in order to
// present the certificate, so we renew outside of read lock)
renewQueue = append(renewQueue, cert)
}
}
certCacheMu.RUnlock()
// Perform renewals that are queued
for _, cert := range renewQueue {
// Get the name which we should use to renew this certificate;
// we only support managing certificates with one name per cert,
// so this should be easy. We can't rely on cert.Config.Hostname
// because it may be a wildcard value from the Caddyfile (e.g.
// *.something.com) which, as of Jan. 2017, is not supported by ACME.
var renewName string
for _, name := range cert.Names {
if name != "" {
renewName = name
break
}
}
// perform renewal
err := cert.Config.RenewCert(renewName, allowPrompts)
if err != nil {
if allowPrompts && cert.NotAfter.Sub(time.Now().UTC()) < 0 {
// Certificate renewal failed, the operator is present, and the certificate
// is already expired; we should stop immediately and return the error. Note
// that we used to do this any time a renewal failed at startup. However,
// after discussion in https://github.com/mholt/caddy/issues/642 we decided to
// only stop startup if the certificate is expired. We still log the error
// otherwise. I'm not sure how permanent the change in #642 will be...
// TODO: Get rid of the expiration check... always break on error.
return err
}
log.Printf("[ERROR] %v", err)
if cert.Config.OnDemand {
deleteQueue = append(deleteQueue, cert)
}
} else {
// successful renewal, so update in-memory cache by loading
// renewed certificate so it will be used with handshakes
// TODO: Not until CA has valid OCSP response ready for the new cert... sigh.
if cert.Names[len(cert.Names)-1] == "" {
// Special case: This is the default certificate. We must
// flush it out of the cache so that we no longer point to
// the old, un-renewed certificate. Otherwise it will be
// renewed on every scan, which is too often. The next cert
// to be cached (probably this one) will become the default.
certCacheMu.Lock()
delete(certCache, "")
certCacheMu.Unlock()
}
_, err := CacheManagedCertificate(cert.Names[0], cert.Config)
if err != nil {
if allowPrompts {
return err // operator is present, so report error immediately
}
log.Printf("[ERROR] %v", err)
}
}
}
// Apply queued deletion changes to the cache
for _, cert := range deleteQueue {
certCacheMu.Lock()
for _, name := range cert.Names {
delete(certCache, name)
}
certCacheMu.Unlock()
}
return nil
}
// UpdateOCSPStaples updates the OCSP stapling in all
// eligible, cached certificates.
//
// OCSP maintenance strives to abide the relevant points on
// Ryan Sleevi's recommendations for good OCSP support:
// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8
func UpdateOCSPStaples() {
// Create a temporary place to store updates
// until we release the potentially long-lived
// read lock and use a short-lived write lock.
type ocspUpdate struct {
rawBytes []byte
parsed *ocsp.Response
}
updated := make(map[string]ocspUpdate)
// A single SAN certificate maps to multiple names, so we use this
// set to make sure we don't waste cycles checking OCSP for the same
// certificate multiple times.
visited := make(map[string]struct{})
certCacheMu.RLock()
for name, cert := range certCache {
// skip this certificate if we've already visited it,
// and if not, mark all the names as visited
if _, ok := visited[name]; ok {
continue
}
for _, n := range cert.Names {
visited[n] = struct{}{}
}
// no point in updating OCSP for expired certificates
if time.Now().After(cert.NotAfter) {
continue
}
var lastNextUpdate time.Time
if cert.OCSP != nil {
lastNextUpdate = cert.OCSP.NextUpdate
if freshOCSP(cert.OCSP) {
// no need to update staple if ours is still fresh
continue
}
}
err := stapleOCSP(&cert, nil)
if err != nil {
if cert.OCSP != nil {
// if there was no staple before, that's fine; otherwise we should log the error
log.Printf("[ERROR] Checking OCSP: %v", err)
}
continue
}
// By this point, we've obtained the latest OCSP response.
// If there was no staple before, or if the response is updated, make
// sure we apply the update to all names on the certificate.
if lastNextUpdate.IsZero() || lastNextUpdate != cert.OCSP.NextUpdate {
log.Printf("[INFO] Advancing OCSP staple for %v from %s to %s",
cert.Names, lastNextUpdate, cert.OCSP.NextUpdate)
for _, n := range cert.Names {
updated[n] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.OCSP}
}
}
}
certCacheMu.RUnlock()
// This write lock should be brief since we have all the info we need now.
certCacheMu.Lock()
for name, update := range updated {
cert := certCache[name]
cert.OCSP = update.parsed
cert.Certificate.OCSPStaple = update.rawBytes
certCache[name] = cert
}
certCacheMu.Unlock()
}
// DeleteOldStapleFiles deletes cached OCSP staples that have expired.
// TODO: Should we do this for certificates too?
func DeleteOldStapleFiles() {
files, err := ioutil.ReadDir(ocspFolder)
if err != nil {
// maybe just hasn't been created yet; no big deal
return
}
for _, file := range files {
if file.IsDir() {
// weird, what's a folder doing inside the OCSP cache?
continue
}
stapleFile := filepath.Join(ocspFolder, file.Name())
ocspBytes, err := ioutil.ReadFile(stapleFile)
if err != nil {
continue
}
resp, err := ocsp.ParseResponse(ocspBytes, nil)
if err != nil {
// contents are invalid; delete it
err = os.Remove(stapleFile)
if err != nil {
log.Printf("[ERROR] Purging corrupt staple file %s: %v", stapleFile, err)
}
}
if time.Now().After(resp.NextUpdate) {
// response has expired; delete it
err = os.Remove(stapleFile)
if err != nil {
log.Printf("[ERROR] Purging expired staple file %s: %v", stapleFile, err)
}
}
}
}
// freshOCSP returns true if resp is still fresh,
// meaning that it is not expedient to get an
// updated response from the OCSP server.
func freshOCSP(resp *ocsp.Response) bool {
// start checking OCSP staple about halfway through validity period for good measure
refreshTime := resp.ThisUpdate.Add(resp.NextUpdate.Sub(resp.ThisUpdate) / 2)
return time.Now().Before(refreshTime)
}
var ocspFolder = filepath.Join(caddy.AssetsPath(), "ocsp")