2019-08-09 13:05:47 -05:00
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"fmt"
"reflect"
2019-08-21 11:46:35 -05:00
"sort"
2019-08-09 13:05:47 -05:00
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
2019-08-21 11:46:35 -05:00
"github.com/mholt/certmagic"
2019-08-09 13:05:47 -05:00
)
func init ( ) {
caddyconfig . RegisterAdapter ( "caddyfile" , caddyfile . Adapter { ServerType : ServerType { } } )
}
// ServerType can set up a config from an HTTP Caddyfile.
type ServerType struct {
}
// Setup makes a config from the tokens.
func ( st ServerType ) Setup ( originalServerBlocks [ ] caddyfile . ServerBlock ,
2019-08-22 14:38:37 -05:00
options map [ string ] interface { } ) ( * caddy . Config , [ ] caddyconfig . Warning , error ) {
2019-08-09 13:05:47 -05:00
var warnings [ ] caddyconfig . Warning
2020-01-16 19:08:52 -05:00
gc := counter { new ( int ) }
2019-08-09 13:05:47 -05:00
2020-02-16 17:28:27 -05:00
// load all the server blocks and associate them with a "pile"
// of config values; also prohibit duplicate keys because they
// can make a config confusing if more than one server block is
// chosen to handle a request - we actually will make each
// server block's route terminal so that only one will run
sbKeys := make ( map [ string ] struct { } )
2019-08-21 11:46:35 -05:00
var serverBlocks [ ] serverBlock
2020-02-16 17:28:27 -05:00
for i , sblock := range originalServerBlocks {
for j , k := range sblock . Keys {
if _ , ok := sbKeys [ k ] ; ok {
return nil , warnings , fmt . Errorf ( "duplicate site address not allowed: '%s' in %v (site block %d, key %d)" , k , sblock . Keys , i , j )
}
sbKeys [ k ] = struct { } { }
}
2019-08-21 11:46:35 -05:00
serverBlocks = append ( serverBlocks , serverBlock {
block : sblock ,
pile : make ( map [ string ] [ ] ConfigValue ) ,
} )
}
2020-02-16 17:28:27 -05:00
// apply any global options
var err error
serverBlocks , err = st . evaluateGlobalOptionsBlock ( serverBlocks , options )
if err != nil {
return nil , warnings , err
2019-08-22 14:38:37 -05:00
}
2019-08-21 11:46:35 -05:00
for _ , sb := range serverBlocks {
2019-08-21 12:03:50 -05:00
// replace shorthand placeholders (which are
// convenient when writing a Caddyfile) with
// their actual placeholder identifiers or
// variable names
replacer := strings . NewReplacer (
2020-02-04 15:31:22 -05:00
"{dir}" , "{http.request.uri.path.dir}" ,
"{file}" , "{http.request.uri.path.file}" ,
2019-08-21 12:03:50 -05:00
"{host}" , "{http.request.host}" ,
"{hostport}" , "{http.request.hostport}" ,
"{method}" , "{http.request.method}" ,
2020-02-04 15:31:22 -05:00
"{path}" , "{http.request.uri.path}" ,
2020-01-10 19:02:11 -05:00
"{query}" , "{http.request.uri.query}" ,
2020-02-04 15:31:22 -05:00
"{remote_host}" , "{http.request.remote.host}" ,
"{remote_port}" , "{http.request.remote.port}" ,
"{remote}" , "{http.request.remote}" ,
"{scheme}" , "{http.request.scheme}" ,
"{uri}" , "{http.request.uri}" ,
2020-02-25 21:22:50 -05:00
"{tls_cipher}" , "{http.request.tls.cipher_suite}" ,
"{tls_version}" , "{http.request.tls.version}" ,
"{tls_client_fingerprint}" , "{http.request.tls.client.fingerprint}" ,
"{tls_client_issuer}" , "{http.request.tls.client.issuer}" ,
"{tls_client_serial}" , "{http.request.tls.client.serial}" ,
"{tls_client_subject}" , "{http.request.tls.client.subject}" ,
2019-08-21 12:03:50 -05:00
)
for _ , segment := range sb . block . Segments {
for i := 0 ; i < len ( segment ) ; i ++ {
segment [ i ] . Text = replacer . Replace ( segment [ i ] . Text )
}
}
2019-08-22 14:38:37 -05:00
if len ( sb . block . Keys ) == 0 {
return nil , warnings , fmt . Errorf ( "server block without any key is global configuration, and if used, it must be first" )
}
2019-08-21 11:46:35 -05:00
// extract matcher definitions
2020-01-09 16:00:32 -05:00
matcherDefs := make ( map [ string ] caddy . ModuleMap )
for _ , segment := range sb . block . Segments {
if dir := segment . Directive ( ) ; strings . HasPrefix ( dir , matcherPrefix ) {
d := sb . block . DispenseDirective ( dir )
err := parseMatcherDefinitions ( d , matcherDefs )
if err != nil {
return nil , warnings , err
}
}
2019-08-21 11:46:35 -05:00
}
2020-02-16 17:28:27 -05:00
// evaluate each directive ("segment") in this block
2019-08-21 11:46:35 -05:00
for _ , segment := range sb . block . Segments {
dir := segment . Directive ( )
2020-01-09 16:00:32 -05:00
if strings . HasPrefix ( dir , matcherPrefix ) {
// matcher definitions were pre-processed
2019-08-21 11:46:35 -05:00
continue
}
2020-01-09 16:00:32 -05:00
dirFunc , ok := registeredDirectives [ dir ]
if ! ok {
2019-08-21 12:03:50 -05:00
tkn := segment [ 0 ]
return nil , warnings , fmt . Errorf ( "%s:%d: unrecognized directive: %s" , tkn . File , tkn . Line , dir )
2019-08-21 11:46:35 -05:00
}
2020-01-09 16:00:32 -05:00
results , err := dirFunc ( Helper {
2020-01-16 13:29:20 -05:00
Dispenser : caddyfile . NewDispenser ( segment ) ,
options : options ,
warnings : & warnings ,
matcherDefs : matcherDefs ,
parentBlock : sb . block ,
2020-01-16 19:08:52 -05:00
groupCounter : gc ,
2020-01-09 16:00:32 -05:00
} )
if err != nil {
return nil , warnings , fmt . Errorf ( "parsing caddyfile tokens for '%s': %v" , dir , err )
}
for _ , result := range results {
result . directive = dir
sb . pile [ result . Class ] = append ( sb . pile [ result . Class ] , result )
}
2019-08-21 11:46:35 -05:00
}
}
2019-08-09 13:05:47 -05:00
// map
2020-01-19 13:51:17 -05:00
sbmap , err := st . mapAddressToServerBlocks ( serverBlocks , options )
2019-08-09 13:05:47 -05:00
if err != nil {
return nil , warnings , err
}
// reduce
pairings := st . consolidateAddrMappings ( sbmap )
// each pairing of listener addresses to list of server
// blocks is basically a server definition
2020-01-16 19:08:52 -05:00
servers , err := st . serversFromPairings ( pairings , options , & warnings , gc )
2019-08-09 13:05:47 -05:00
if err != nil {
return nil , warnings , err
}
// now that each server is configured, make the HTTP app
httpApp := caddyhttp . App {
2020-02-26 18:01:47 -05:00
HTTPPort : tryInt ( options [ "http_port" ] , & warnings ) ,
HTTPSPort : tryInt ( options [ "https_port" ] , & warnings ) ,
DefaultSNI : tryString ( options [ "default_sni" ] , & warnings ) ,
Servers : servers ,
2019-08-09 13:05:47 -05:00
}
// now for the TLS app! (TODO: refactor into own func)
2019-12-10 15:36:46 -05:00
tlsApp := caddytls . TLS { CertificatesRaw : make ( caddy . ModuleMap ) }
2020-02-18 13:13:51 -05:00
var certLoaders [ ] caddytls . CertificateLoader
2019-08-09 13:05:47 -05:00
for _ , p := range pairings {
2019-09-30 10:11:30 -05:00
for i , sblock := range p . serverBlocks {
2019-08-21 11:46:35 -05:00
// tls automation policies
if mmVals , ok := sblock . pile [ "tls.automation_manager" ] ; ok {
for _ , mmVal := range mmVals {
mm := mmVal . Value . ( caddytls . ManagerMaker )
sblockHosts , err := st . autoHTTPSHosts ( sblock )
2019-08-09 13:05:47 -05:00
if err != nil {
2019-08-21 11:46:35 -05:00
return nil , warnings , err
2019-08-09 13:05:47 -05:00
}
2019-09-30 10:11:30 -05:00
if len ( sblockHosts ) > 0 {
2019-09-30 12:53:21 -05:00
if tlsApp . Automation == nil {
tlsApp . Automation = new ( caddytls . AutomationConfig )
}
2020-02-14 13:14:52 -05:00
tlsApp . Automation . Policies = append ( tlsApp . Automation . Policies , & caddytls . AutomationPolicy {
2019-09-30 10:11:30 -05:00
Hosts : sblockHosts ,
2019-12-10 15:36:46 -05:00
ManagementRaw : caddyconfig . JSONModuleObject ( mm , "module" , mm . ( caddy . Module ) . CaddyModule ( ) . ID . Name ( ) , & warnings ) ,
2019-09-30 10:11:30 -05:00
} )
} else {
warnings = append ( warnings , caddyconfig . Warning {
Message : fmt . Sprintf ( "Server block %d %v has no names that qualify for automatic HTTPS, so no TLS automation policy will be added." , i , sblock . block . Keys ) ,
} )
}
2019-08-09 13:05:47 -05:00
}
2019-08-21 11:46:35 -05:00
}
// tls certificate loaders
if clVals , ok := sblock . pile [ "tls.certificate_loader" ] ; ok {
for _ , clVal := range clVals {
2020-02-18 13:13:51 -05:00
certLoaders = append ( certLoaders , clVal . Value . ( caddytls . CertificateLoader ) )
2019-08-09 13:05:47 -05:00
}
}
}
}
2020-02-18 13:13:51 -05:00
// group certificate loaders by module name, then add to config
if len ( certLoaders ) > 0 {
2020-02-20 02:15:11 -05:00
loadersByName := make ( map [ string ] caddytls . CertificateLoader )
2020-02-18 13:13:51 -05:00
for _ , cl := range certLoaders {
name := caddy . GetModuleName ( cl )
2020-02-20 02:15:11 -05:00
// ugh... technically, we may have multiple FileLoader and FolderLoader
// modules (because the tls directive returns one per occurrence), but
// the config structure expects only one instance of each kind of loader
// module, so we have to combine them... instead of enumerating each
// possible cert loader module in a type switch, we can use reflection,
// which works on any cert loaders that are slice types
if reflect . TypeOf ( cl ) . Kind ( ) == reflect . Slice {
combined := reflect . ValueOf ( loadersByName [ name ] )
if ! combined . IsValid ( ) {
combined = reflect . New ( reflect . TypeOf ( cl ) ) . Elem ( )
}
clVal := reflect . ValueOf ( cl )
for i := 0 ; i < clVal . Len ( ) ; i ++ {
combined = reflect . Append ( reflect . Value ( combined ) , clVal . Index ( i ) )
}
loadersByName [ name ] = combined . Interface ( ) . ( caddytls . CertificateLoader )
}
2020-02-18 13:13:51 -05:00
}
for certLoaderName , loaders := range loadersByName {
tlsApp . CertificatesRaw [ certLoaderName ] = caddyconfig . JSON ( loaders , & warnings )
}
}
2020-02-08 18:52:54 -05:00
// if global ACME CA, DNS, or email were set, append a catch-all automation
2019-09-30 10:11:30 -05:00
// policy that ensures they will be used if no tls directive was used
acmeCA , hasACMECA := options [ "acme_ca" ]
2020-02-08 18:52:54 -05:00
acmeDNS , hasACMEDNS := options [ "acme_dns" ]
2019-09-30 10:11:30 -05:00
email , hasEmail := options [ "email" ]
2020-02-08 18:52:54 -05:00
if hasACMECA || hasACMEDNS || hasEmail {
2019-09-30 10:11:30 -05:00
if tlsApp . Automation == nil {
tlsApp . Automation = new ( caddytls . AutomationConfig )
}
2019-10-28 16:08:45 -05:00
if ! hasACMECA {
acmeCA = ""
}
if ! hasEmail {
email = ""
}
2020-02-08 18:52:54 -05:00
mgr := caddytls . ACMEManagerMaker {
CA : acmeCA . ( string ) ,
Email : email . ( string ) ,
}
if hasACMEDNS {
provName := acmeDNS . ( string )
dnsProvModule , err := caddy . GetModule ( "tls.dns." + provName )
if err != nil {
return nil , warnings , fmt . Errorf ( "getting DNS provider module named '%s': %v" , provName , err )
}
mgr . Challenges = & caddytls . ChallengesConfig {
DNSRaw : caddyconfig . JSONModuleObject ( dnsProvModule . New ( ) , "provider" , provName , & warnings ) ,
}
}
2020-02-14 13:14:52 -05:00
tlsApp . Automation . Policies = append ( tlsApp . Automation . Policies , & caddytls . AutomationPolicy {
2020-02-08 18:52:54 -05:00
ManagementRaw : caddyconfig . JSONModuleObject ( mgr , "module" , "acme" , & warnings ) ,
2019-09-30 10:11:30 -05:00
} )
}
if tlsApp . Automation != nil {
// consolidate automation policies that are the exact same
tlsApp . Automation . Policies = consolidateAutomationPolicies ( tlsApp . Automation . Policies )
}
2019-08-09 13:05:47 -05:00
2019-09-11 18:16:21 -05:00
// if experimental HTTP/3 is enabled, enable it on each server
if enableH3 , ok := options [ "experimental_http3" ] . ( bool ) ; ok && enableH3 {
for _ , srv := range httpApp . Servers {
srv . ExperimentalHTTP3 = true
}
}
2020-02-26 00:00:33 -05:00
// extract any custom logs, and enforce configured levels
var customLogs [ ] namedCustomLog
var hasDefaultLog bool
for _ , sb := range serverBlocks {
for _ , clVal := range sb . pile [ "custom_log" ] {
ncl := clVal . Value . ( namedCustomLog )
if ncl . name == "" {
continue
}
if ncl . name == "default" {
hasDefaultLog = true
}
if _ , ok := options [ "debug" ] ; ok && ncl . log . Level == "" {
ncl . log . Level = "DEBUG"
}
customLogs = append ( customLogs , ncl )
}
}
if ! hasDefaultLog {
// if the default log was not customized, ensure we
// configure it with any applicable options
if _ , ok := options [ "debug" ] ; ok {
customLogs = append ( customLogs , namedCustomLog {
name : "default" ,
log : & caddy . CustomLog { Level : "DEBUG" } ,
} )
}
}
2019-08-09 13:05:47 -05:00
// annnd the top-level config, then we're done!
2019-12-10 15:36:46 -05:00
cfg := & caddy . Config { AppsRaw : make ( caddy . ModuleMap ) }
2019-08-09 13:05:47 -05:00
if ! reflect . DeepEqual ( httpApp , caddyhttp . App { } ) {
cfg . AppsRaw [ "http" ] = caddyconfig . JSON ( httpApp , & warnings )
}
2019-12-10 15:36:46 -05:00
if ! reflect . DeepEqual ( tlsApp , caddytls . TLS { CertificatesRaw : make ( caddy . ModuleMap ) } ) {
2019-08-09 13:05:47 -05:00
cfg . AppsRaw [ "tls" ] = caddyconfig . JSON ( tlsApp , & warnings )
}
2019-09-19 13:42:36 -05:00
if storageCvtr , ok := options [ "storage" ] . ( caddy . StorageConverter ) ; ok {
2019-09-26 19:06:15 -05:00
cfg . StorageRaw = caddyconfig . JSONModuleObject ( storageCvtr ,
"module" ,
2019-12-10 15:36:46 -05:00
storageCvtr . ( caddy . Module ) . CaddyModule ( ) . ID . Name ( ) ,
2019-09-26 19:06:15 -05:00
& warnings )
2019-09-19 13:42:36 -05:00
}
2019-10-30 16:12:42 -05:00
if adminConfig , ok := options [ "admin" ] . ( string ) ; ok && adminConfig != "" {
2020-02-27 23:04:06 -05:00
if adminConfig == "off" {
cfg . Admin = & caddy . AdminConfig { Disabled : true }
} else {
cfg . Admin = & caddy . AdminConfig { Listen : adminConfig }
}
2019-10-30 16:12:42 -05:00
}
2020-02-26 00:00:33 -05:00
if len ( customLogs ) > 0 {
if cfg . Logging == nil {
cfg . Logging = & caddy . Logging {
Logs : make ( map [ string ] * caddy . CustomLog ) ,
}
}
for _ , ncl := range customLogs {
if ncl . name != "" {
cfg . Logging . Logs [ ncl . name ] = ncl . log
}
}
}
2019-08-09 13:05:47 -05:00
return cfg , warnings , nil
}
2020-02-16 17:28:27 -05:00
// evaluateGlobalOptionsBlock evaluates the global options block,
// which is expected to be the first server block if it has zero
// keys. It returns the updated list of server blocks with the
// global options block removed, and updates options accordingly.
func ( ServerType ) evaluateGlobalOptionsBlock ( serverBlocks [ ] serverBlock , options map [ string ] interface { } ) ( [ ] serverBlock , error ) {
if len ( serverBlocks ) == 0 || len ( serverBlocks [ 0 ] . block . Keys ) > 0 {
return serverBlocks , nil
}
for _ , segment := range serverBlocks [ 0 ] . block . Segments {
dir := segment . Directive ( )
var val interface { }
var err error
disp := caddyfile . NewDispenser ( segment )
// TODO: make this switch into a map
switch dir {
case "http_port" :
val , err = parseOptHTTPPort ( disp )
case "https_port" :
val , err = parseOptHTTPSPort ( disp )
2020-02-26 18:01:47 -05:00
case "default_sni" :
val , err = parseOptSingleString ( disp )
2020-02-16 17:28:27 -05:00
case "order" :
val , err = parseOptOrder ( disp )
case "experimental_http3" :
val , err = parseOptExperimentalHTTP3 ( disp )
case "storage" :
val , err = parseOptStorage ( disp )
case "acme_ca" , "acme_dns" , "acme_ca_root" :
2020-02-26 18:01:47 -05:00
val , err = parseOptSingleString ( disp )
2020-02-16 17:28:27 -05:00
case "email" :
2020-02-26 18:01:47 -05:00
val , err = parseOptSingleString ( disp )
2020-02-16 17:28:27 -05:00
case "admin" :
val , err = parseOptAdmin ( disp )
2020-02-26 00:00:33 -05:00
case "debug" :
options [ "debug" ] = true
2020-02-16 17:28:27 -05:00
default :
return nil , fmt . Errorf ( "unrecognized parameter name: %s" , dir )
}
if err != nil {
return nil , fmt . Errorf ( "%s: %v" , dir , err )
}
options [ dir ] = val
}
return serverBlocks [ 1 : ] , nil
}
2019-08-09 13:05:47 -05:00
// hostsFromServerBlockKeys returns a list of all the
// hostnames found in the keys of the server block sb.
// The list may not be in a consistent order.
func ( st * ServerType ) hostsFromServerBlockKeys ( sb caddyfile . ServerBlock ) ( [ ] string , error ) {
// first get each unique hostname
hostMap := make ( map [ string ] struct { } )
for _ , sblockKey := range sb . Keys {
2019-08-21 11:46:35 -05:00
addr , err := ParseAddress ( sblockKey )
2019-08-09 13:05:47 -05:00
if err != nil {
return nil , fmt . Errorf ( "parsing server block key: %v" , err )
}
2019-08-21 11:46:35 -05:00
addr = addr . Normalize ( )
2019-12-13 18:32:27 -05:00
if addr . Host == "" {
continue
}
2019-08-09 13:05:47 -05:00
hostMap [ addr . Host ] = struct { } { }
}
// convert map to slice
sblockHosts := make ( [ ] string , 0 , len ( hostMap ) )
for host := range hostMap {
sblockHosts = append ( sblockHosts , host )
}
return sblockHosts , nil
}
// serversFromPairings creates the servers for each pairing of addresses
// to server blocks. Each pairing is essentially a server definition.
2019-08-22 15:26:33 -05:00
func ( st * ServerType ) serversFromPairings (
pairings [ ] sbAddrAssociation ,
options map [ string ] interface { } ,
warnings * [ ] caddyconfig . Warning ,
2020-01-16 19:08:52 -05:00
groupCounter counter ,
2019-08-22 15:26:33 -05:00
) ( map [ string ] * caddyhttp . Server , error ) {
2019-08-09 13:05:47 -05:00
servers := make ( map [ string ] * caddyhttp . Server )
for i , p := range pairings {
srv := & caddyhttp . Server {
Listen : p . addresses ,
}
2020-01-15 15:51:12 -05:00
// sort server blocks by their keys; this is important because
// only the first matching site should be evaluated, and we should
// attempt to match most specific site first (host and path), in
// case their matchers overlap; we do this somewhat naively by
// descending sort by length of host then path
sort . SliceStable ( p . serverBlocks , func ( i , j int ) bool {
2020-01-16 19:08:52 -05:00
// TODO: we could pre-process the specificities for efficiency,
2020-01-15 15:51:12 -05:00
// but I don't expect many blocks will have SO many keys...
var iLongestPath , jLongestPath string
var iLongestHost , jLongestHost string
for _ , key := range p . serverBlocks [ i ] . block . Keys {
addr , _ := ParseAddress ( key )
2020-01-16 19:08:52 -05:00
if specificity ( addr . Host ) > specificity ( iLongestHost ) {
2020-01-15 15:51:12 -05:00
iLongestHost = addr . Host
}
2020-01-16 19:08:52 -05:00
if specificity ( addr . Path ) > specificity ( iLongestPath ) {
2020-01-15 15:51:12 -05:00
iLongestPath = addr . Path
}
}
for _ , key := range p . serverBlocks [ j ] . block . Keys {
addr , _ := ParseAddress ( key )
2020-01-16 19:08:52 -05:00
if specificity ( addr . Host ) > specificity ( jLongestHost ) {
2020-01-15 15:51:12 -05:00
jLongestHost = addr . Host
}
2020-01-16 19:08:52 -05:00
if specificity ( addr . Path ) > specificity ( jLongestPath ) {
2020-01-15 15:51:12 -05:00
jLongestPath = addr . Path
}
}
2020-01-16 19:08:52 -05:00
if specificity ( iLongestHost ) == specificity ( jLongestHost ) {
2020-01-15 15:51:12 -05:00
return len ( iLongestPath ) > len ( jLongestPath )
}
2020-01-16 19:08:52 -05:00
return specificity ( iLongestHost ) > specificity ( jLongestHost )
2020-01-15 15:51:12 -05:00
} )
2020-02-20 02:15:11 -05:00
var hasCatchAllTLSConnPolicy bool
2020-01-15 15:51:12 -05:00
// create a subroute for each site in the server block
2019-08-09 13:05:47 -05:00
for _ , sblock := range p . serverBlocks {
2019-08-21 11:46:35 -05:00
matcherSetsEnc , err := st . compileEncodedMatcherSets ( sblock . block )
2019-08-09 13:05:47 -05:00
if err != nil {
2019-08-21 11:46:35 -05:00
return nil , fmt . Errorf ( "server block %v: compiling matcher sets: %v" , sblock . block . Keys , err )
2019-08-09 13:05:47 -05:00
}
2019-08-21 11:46:35 -05:00
// tls: connection policies and toggle auto HTTPS
autoHTTPSQualifiedHosts , err := st . autoHTTPSHosts ( sblock )
if err != nil {
return nil , err
2019-08-09 13:05:47 -05:00
}
2019-09-18 11:51:49 -05:00
if _ , ok := sblock . pile [ "tls.off" ] ; ok && len ( autoHTTPSQualifiedHosts ) > 0 {
2019-08-21 11:46:35 -05:00
// tls off: disable TLS (and automatic HTTPS) for server block's names
2019-09-18 11:51:49 -05:00
if srv . AutoHTTPS == nil {
2019-08-21 11:46:35 -05:00
srv . AutoHTTPS = new ( caddyhttp . AutoHTTPSConfig )
2019-08-09 13:05:47 -05:00
}
2019-08-21 11:46:35 -05:00
srv . AutoHTTPS . Skip = append ( srv . AutoHTTPS . Skip , autoHTTPSQualifiedHosts ... )
} else if cpVals , ok := sblock . pile [ "tls.connection_policy" ] ; ok {
// tls connection policies
for _ , cpVal := range cpVals {
cp := cpVal . Value . ( * caddytls . ConnectionPolicy )
2019-08-09 13:05:47 -05:00
2019-12-13 18:32:27 -05:00
// make sure the policy covers all hostnames from the block
hosts , err := st . hostsFromServerBlockKeys ( sblock . block )
if err != nil {
return nil , err
}
2020-02-06 15:00:41 -05:00
// TODO: are matchers needed if every hostname of the resulting config is matched?
2019-12-13 18:32:27 -05:00
if len ( hosts ) > 0 {
2019-12-10 15:36:46 -05:00
cp . MatchersRaw = caddy . ModuleMap {
2019-08-21 11:46:35 -05:00
"sni" : caddyconfig . JSON ( hosts , warnings ) , // make sure to match all hosts, not just auto-HTTPS-qualified ones
2019-08-09 13:05:47 -05:00
}
2020-02-06 15:00:41 -05:00
} else {
2020-02-20 02:15:11 -05:00
hasCatchAllTLSConnPolicy = true
2019-08-09 13:05:47 -05:00
}
2020-02-06 15:00:41 -05:00
2019-12-13 18:32:27 -05:00
srv . TLSConnPolicies = append ( srv . TLSConnPolicies , cp )
2019-08-09 13:05:47 -05:00
}
2019-08-21 11:46:35 -05:00
// TODO: consolidate equal conn policies
2019-08-09 13:05:47 -05:00
}
2020-01-23 15:17:16 -05:00
// exclude any hosts that were defined explicitly with
// "http://" in the key from automated cert management (issue #2998)
for _ , key := range sblock . block . Keys {
addr , err := ParseAddress ( key )
if err != nil {
return nil , err
}
addr = addr . Normalize ( )
if addr . Scheme == "http" {
if srv . AutoHTTPS == nil {
srv . AutoHTTPS = new ( caddyhttp . AutoHTTPSConfig )
}
if ! sliceContains ( srv . AutoHTTPS . Skip , addr . Host ) {
srv . AutoHTTPS . Skip = append ( srv . AutoHTTPS . Skip , addr . Host )
}
}
}
2020-01-16 14:09:54 -05:00
// set up each handler directive, making sure to honor directive order
2019-08-21 11:46:35 -05:00
dirRoutes := sblock . pile [ "route" ]
2020-01-16 19:08:52 -05:00
siteSubroute , err := buildSubroute ( dirRoutes , groupCounter )
if err != nil {
return nil , err
2019-08-09 13:05:47 -05:00
}
2020-02-17 00:24:20 -05:00
// add the site block's route(s) to the server
srv . Routes = appendSubrouteToRouteList ( srv . Routes , siteSubroute , matcherSetsEnc , p , warnings )
// if error routes are defined, add those too
if errorSubrouteVals , ok := sblock . pile [ "error_route" ] ; ok {
if srv . Errors == nil {
srv . Errors = new ( caddyhttp . HTTPErrorConfig )
}
for _ , val := range errorSubrouteVals {
sr := val . Value . ( * caddyhttp . Subroute )
srv . Errors . Routes = appendSubrouteToRouteList ( srv . Errors . Routes , sr , matcherSetsEnc , p , warnings )
}
2020-01-15 15:51:12 -05:00
}
2020-02-26 00:00:33 -05:00
// add log associations
for _ , cval := range sblock . pile [ "custom_log" ] {
ncl := cval . Value . ( namedCustomLog )
if srv . Logs == nil {
srv . Logs = & caddyhttp . ServerLogConfig {
LoggerNames : make ( map [ string ] string ) ,
}
}
hosts , err := st . hostsFromServerBlockKeys ( sblock . block )
if err != nil {
return nil , err
}
for _ , h := range hosts {
if ncl . name != "" {
srv . Logs . LoggerNames [ h ] = ncl . name
}
}
}
2019-08-09 13:05:47 -05:00
}
2020-02-20 02:15:11 -05:00
// a catch-all TLS conn policy is necessary to ensure TLS can
// be offered to all hostnames of the server; even though only
// one policy is needed to enable TLS for the server, that
// policy might apply to only certain TLS handshakes; but when
// using the Caddyfile, user would expect all handshakes to at
// least have a matching connection policy, so here we append a
// catch-all/default policy if there isn't one already (it's
// important that it goes at the end) - see issue #3004:
// https://github.com/caddyserver/caddy/issues/3004
if len ( srv . TLSConnPolicies ) > 0 && ! hasCatchAllTLSConnPolicy {
srv . TLSConnPolicies = append ( srv . TLSConnPolicies , new ( caddytls . ConnectionPolicy ) )
}
2019-08-09 13:05:47 -05:00
srv . Routes = consolidateRoutes ( srv . Routes )
servers [ fmt . Sprintf ( "srv%d" , i ) ] = srv
}
return servers , nil
}
2020-02-17 00:24:20 -05:00
// appendSubrouteToRouteList appends the routes in subroute
// to the routeList, optionally qualified by matchers.
func appendSubrouteToRouteList ( routeList caddyhttp . RouteList ,
subroute * caddyhttp . Subroute ,
matcherSetsEnc [ ] caddy . ModuleMap ,
p sbAddrAssociation ,
warnings * [ ] caddyconfig . Warning ) caddyhttp . RouteList {
if len ( matcherSetsEnc ) == 0 && len ( p . serverBlocks ) == 1 {
// no need to wrap the handlers in a subroute if this is
// the only server block and there is no matcher for it
routeList = append ( routeList , subroute . Routes ... )
} else {
routeList = append ( routeList , caddyhttp . Route {
MatcherSetsRaw : matcherSetsEnc ,
HandlersRaw : [ ] json . RawMessage {
caddyconfig . JSONModuleObject ( subroute , "handler" , "subroute" , warnings ) ,
} ,
Terminal : true , // only first matching site block should be evaluated
} )
}
return routeList
}
// buildSubroute turns the config values, which are expected to be routes
// into a clean and orderly subroute that has all the routes within it.
2020-01-16 19:08:52 -05:00
func buildSubroute ( routes [ ] ConfigValue , groupCounter counter ) ( * caddyhttp . Subroute , error ) {
for _ , val := range routes {
if ! directiveIsOrdered ( val . directive ) {
return nil , fmt . Errorf ( "directive '%s' is not ordered, so it cannot be used here" , val . directive )
}
}
sortRoutes ( routes )
subroute := new ( caddyhttp . Subroute )
2020-02-04 15:04:34 -05:00
// some directives are mutually exclusive (only first matching
// instance should be evaluated); this is done by putting their
// routes in the same group
mutuallyExclusiveDirs := map [ string ] * struct {
count int
groupName string
} {
2020-01-16 19:08:52 -05:00
// as a special case, group rewrite directives so that they are mutually exclusive;
// this means that only the first matching rewrite will be evaluated, and that's
// probably a good thing, since there should never be a need to do more than one
// rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
// rewrite /docs/json/* /docs/json/index.html
// rewrite /docs/* /docs/index.html
// (We use this on the Caddy website, or at least we did once.) The first rewrite's
// result is also matched by the second rewrite, making the first rewrite pointless.
// See issue #2959.
2020-02-04 15:04:34 -05:00
"rewrite" : { } ,
2020-01-16 19:08:52 -05:00
// handle blocks are also mutually exclusive by definition
2020-02-04 15:04:34 -05:00
"handle" : { } ,
// root just sets a variable, so if it was not mutually exclusive, intersecting
// root directives would overwrite previously-matched ones; they should not cascade
"root" : { } ,
}
for meDir , info := range mutuallyExclusiveDirs {
// see how many instances of the directive there are
for _ , r := range routes {
if r . directive == meDir {
info . count ++
if info . count > 1 {
break
}
}
}
// if there is more than one, put them in a group
if info . count > 1 {
info . groupName = groupCounter . nextGroup ( )
}
}
// add all the routes piled in from directives
for _ , r := range routes {
// put this route into a group if it is mutually exclusive
if info , ok := mutuallyExclusiveDirs [ r . directive ] ; ok {
2020-01-16 19:08:52 -05:00
route := r . Value . ( caddyhttp . Route )
2020-02-04 15:04:34 -05:00
route . Group = info . groupName
2020-01-16 19:08:52 -05:00
r . Value = route
}
switch route := r . Value . ( type ) {
case caddyhttp . Subroute :
// if a route-class config value is actually a Subroute handler
// with nothing but a list of routes, then it is the intention
// of the directive to keep these handlers together and in this
// same order, but not necessarily in a subroute (if it wanted
// to keep them in a subroute, the directive would have returned
// a route with a Subroute as its handler); this is useful to
// keep multiple handlers/routes together and in the same order
// so that the sorting procedure we did above doesn't reorder them
if route . Errors != nil {
// if error handlers are also set, this is confusing; it's
// probably supposed to be wrapped in a Route and encoded
// as a regular handler route... programmer error.
panic ( "found subroute with more than just routes; perhaps it should have been wrapped in a route?" )
}
subroute . Routes = append ( subroute . Routes , route . Routes ... )
case caddyhttp . Route :
subroute . Routes = append ( subroute . Routes , route )
}
}
subroute . Routes = consolidateRoutes ( subroute . Routes )
return subroute , nil
}
2019-08-21 11:46:35 -05:00
func ( st ServerType ) autoHTTPSHosts ( sb serverBlock ) ( [ ] string , error ) {
// get the hosts for this server block...
hosts , err := st . hostsFromServerBlockKeys ( sb . block )
if err != nil {
return nil , err
}
// ...and of those, which ones qualify for auto HTTPS
var autoHTTPSQualifiedHosts [ ] string
for _ , h := range hosts {
if certmagic . HostQualifies ( h ) {
autoHTTPSQualifiedHosts = append ( autoHTTPSQualifiedHosts , h )
}
}
return autoHTTPSQualifiedHosts , nil
}
2019-08-09 13:05:47 -05:00
// consolidateRoutes combines routes with the same properties
// (same matchers, same Terminal and Group settings) for a
// cleaner overall output.
func consolidateRoutes ( routes caddyhttp . RouteList ) caddyhttp . RouteList {
for i := 0 ; i < len ( routes ) - 1 ; i ++ {
2019-08-21 11:46:35 -05:00
if reflect . DeepEqual ( routes [ i ] . MatcherSetsRaw , routes [ i + 1 ] . MatcherSetsRaw ) &&
2019-08-09 13:05:47 -05:00
routes [ i ] . Terminal == routes [ i + 1 ] . Terminal &&
routes [ i ] . Group == routes [ i + 1 ] . Group {
// keep the handlers in the same order, then splice out repetitive route
2019-08-21 11:46:35 -05:00
routes [ i ] . HandlersRaw = append ( routes [ i ] . HandlersRaw , routes [ i + 1 ] . HandlersRaw ... )
2019-08-09 13:05:47 -05:00
routes = append ( routes [ : i + 1 ] , routes [ i + 2 : ] ... )
i --
}
}
return routes
}
2019-08-21 11:46:35 -05:00
// consolidateAutomationPolicies combines automation policies that are the same,
// for a cleaner overall output.
2020-02-14 13:14:52 -05:00
func consolidateAutomationPolicies ( aps [ ] * caddytls . AutomationPolicy ) [ ] * caddytls . AutomationPolicy {
2019-08-21 11:46:35 -05:00
for i := 0 ; i < len ( aps ) ; i ++ {
for j := 0 ; j < len ( aps ) ; j ++ {
if j == i {
continue
2019-08-09 13:05:47 -05:00
}
2020-02-06 15:00:41 -05:00
// if they're exactly equal in every way, just keep one of them
if reflect . DeepEqual ( aps [ i ] , aps [ j ] ) {
2019-09-30 10:11:30 -05:00
aps = append ( aps [ : j ] , aps [ j + 1 : ] ... )
i --
break
2019-08-09 13:05:47 -05:00
}
2020-02-06 15:00:41 -05:00
// if the policy is the same, we can keep just one, but we have
// to be careful which one we keep; if only one has any hostnames
// defined, then we need to keep the one without any hostnames,
// otherwise the one without any hosts (a catch-all) would be
// eaten up by the one with hosts; and if both have hosts, we
// need to combine their lists
if reflect . DeepEqual ( aps [ i ] . ManagementRaw , aps [ j ] . ManagementRaw ) &&
aps [ i ] . ManageSync == aps [ j ] . ManageSync {
if len ( aps [ i ] . Hosts ) == 0 && len ( aps [ j ] . Hosts ) > 0 {
aps = append ( aps [ : j ] , aps [ j + 1 : ] ... )
} else if len ( aps [ i ] . Hosts ) > 0 && len ( aps [ j ] . Hosts ) == 0 {
aps = append ( aps [ : i ] , aps [ i + 1 : ] ... )
} else {
aps [ i ] . Hosts = append ( aps [ i ] . Hosts , aps [ j ] . Hosts ... )
aps = append ( aps [ : j ] , aps [ j + 1 : ] ... )
}
i --
break
}
2019-08-09 13:05:47 -05:00
}
}
2020-02-06 15:00:41 -05:00
// ensure any catch-all policies go last
sort . SliceStable ( aps , func ( i , j int ) bool {
return len ( aps [ i ] . Hosts ) > len ( aps [ j ] . Hosts )
} )
2019-08-21 11:46:35 -05:00
return aps
2019-08-09 13:05:47 -05:00
}
2019-08-21 11:46:35 -05:00
func matcherSetFromMatcherToken (
2019-08-09 13:05:47 -05:00
tkn caddyfile . Token ,
2019-12-10 15:36:46 -05:00
matcherDefs map [ string ] caddy . ModuleMap ,
2019-08-09 13:05:47 -05:00
warnings * [ ] caddyconfig . Warning ,
2019-12-10 15:36:46 -05:00
) ( caddy . ModuleMap , bool , error ) {
2019-08-09 13:05:47 -05:00
// matcher tokens can be wildcards, simple path matchers,
// or refer to a pre-defined matcher by some name
if tkn . Text == "*" {
// match all requests == no matchers, so nothing to do
return nil , true , nil
2020-01-09 16:00:32 -05:00
} else if strings . HasPrefix ( tkn . Text , "/" ) {
2019-08-09 13:05:47 -05:00
// convenient way to specify a single path match
2019-12-10 15:36:46 -05:00
return caddy . ModuleMap {
2019-08-09 13:05:47 -05:00
"path" : caddyconfig . JSON ( caddyhttp . MatchPath { tkn . Text } , warnings ) ,
} , true , nil
2020-01-09 16:00:32 -05:00
} else if strings . HasPrefix ( tkn . Text , matcherPrefix ) {
2019-08-09 13:05:47 -05:00
// pre-defined matcher
2020-01-09 16:00:32 -05:00
m , ok := matcherDefs [ tkn . Text ]
2019-08-09 13:05:47 -05:00
if ! ok {
2020-01-09 16:00:32 -05:00
return nil , false , fmt . Errorf ( "unrecognized matcher name: %+v" , tkn . Text )
2019-08-09 13:05:47 -05:00
}
return m , true , nil
}
return nil , false , nil
}
2019-12-10 15:36:46 -05:00
func ( st * ServerType ) compileEncodedMatcherSets ( sblock caddyfile . ServerBlock ) ( [ ] caddy . ModuleMap , error ) {
2019-08-09 13:05:47 -05:00
type hostPathPair struct {
hostm caddyhttp . MatchHost
pathm caddyhttp . MatchPath
}
// keep routes with common host and path matchers together
var matcherPairs [ ] * hostPathPair
for _ , key := range sblock . Keys {
2019-08-21 11:46:35 -05:00
addr , err := ParseAddress ( key )
2019-08-09 13:05:47 -05:00
if err != nil {
return nil , fmt . Errorf ( "server block %v: parsing and standardizing address '%s': %v" , sblock . Keys , key , err )
}
2019-08-21 11:46:35 -05:00
addr = addr . Normalize ( )
2019-08-09 13:05:47 -05:00
// choose a matcher pair that should be shared by this
// server block; if none exists yet, create one
var chosenMatcherPair * hostPathPair
for _ , mp := range matcherPairs {
if ( len ( mp . pathm ) == 0 && addr . Path == "" ) ||
( len ( mp . pathm ) == 1 && mp . pathm [ 0 ] == addr . Path ) {
chosenMatcherPair = mp
break
}
}
if chosenMatcherPair == nil {
chosenMatcherPair = new ( hostPathPair )
if addr . Path != "" {
chosenMatcherPair . pathm = [ ] string { addr . Path }
}
matcherPairs = append ( matcherPairs , chosenMatcherPair )
}
// add this server block's keys to the matcher
// pair if it doesn't already exist
if addr . Host != "" {
var found bool
for _ , h := range chosenMatcherPair . hostm {
if h == addr . Host {
found = true
break
}
}
if ! found {
chosenMatcherPair . hostm = append ( chosenMatcherPair . hostm , addr . Host )
}
}
}
// iterate each pairing of host and path matchers and
// put them into a map for JSON encoding
var matcherSets [ ] map [ string ] caddyhttp . RequestMatcher
for _ , mp := range matcherPairs {
matcherSet := make ( map [ string ] caddyhttp . RequestMatcher )
if len ( mp . hostm ) > 0 {
matcherSet [ "host" ] = mp . hostm
}
if len ( mp . pathm ) > 0 {
matcherSet [ "path" ] = mp . pathm
}
if len ( matcherSet ) > 0 {
matcherSets = append ( matcherSets , matcherSet )
}
}
// finally, encode each of the matcher sets
2019-12-10 15:36:46 -05:00
var matcherSetsEnc [ ] caddy . ModuleMap
2019-08-09 13:05:47 -05:00
for _ , ms := range matcherSets {
msEncoded , err := encodeMatcherSet ( ms )
if err != nil {
return nil , fmt . Errorf ( "server block %v: %v" , sblock . Keys , err )
}
matcherSetsEnc = append ( matcherSetsEnc , msEncoded )
}
return matcherSetsEnc , nil
}
2020-01-09 16:00:32 -05:00
func parseMatcherDefinitions ( d * caddyfile . Dispenser , matchers map [ string ] caddy . ModuleMap ) error {
2019-09-30 10:11:30 -05:00
for d . Next ( ) {
definitionName := d . Val ( )
2020-01-09 16:00:32 -05:00
if _ , ok := matchers [ definitionName ] ; ok {
return fmt . Errorf ( "matcher is defined more than once: %s" , definitionName )
}
matchers [ definitionName ] = make ( caddy . ModuleMap )
2020-02-14 13:00:16 -05:00
// in case there are multiple instances of the same matcher, concatenate
// their tokens (we expect that UnmarshalCaddyfile should be able to
// handle more than one segment); otherwise, we'd overwrite other
// instances of the matcher in this set
tokensByMatcherName := make ( map [ string ] [ ] caddyfile . Token )
2019-09-30 10:11:30 -05:00
for nesting := d . Nesting ( ) ; d . NextBlock ( nesting ) ; {
matcherName := d . Val ( )
2020-02-14 13:00:16 -05:00
tokensByMatcherName [ matcherName ] = append ( tokensByMatcherName [ matcherName ] , d . NextSegment ( ) ... )
}
for matcherName , tokens := range tokensByMatcherName {
2019-09-30 10:11:30 -05:00
mod , err := caddy . GetModule ( "http.matchers." + matcherName )
if err != nil {
2020-01-09 16:00:32 -05:00
return fmt . Errorf ( "getting matcher module '%s': %v" , matcherName , err )
2019-09-30 10:11:30 -05:00
}
unm , ok := mod . New ( ) . ( caddyfile . Unmarshaler )
if ! ok {
2020-01-09 16:00:32 -05:00
return fmt . Errorf ( "matcher module '%s' is not a Caddyfile unmarshaler" , matcherName )
2019-09-30 10:11:30 -05:00
}
2020-02-14 13:00:16 -05:00
err = unm . UnmarshalCaddyfile ( caddyfile . NewDispenser ( tokens ) )
2019-09-30 10:11:30 -05:00
if err != nil {
2020-01-09 16:00:32 -05:00
return err
2019-09-30 10:11:30 -05:00
}
rm , ok := unm . ( caddyhttp . RequestMatcher )
if ! ok {
2020-01-09 16:00:32 -05:00
return fmt . Errorf ( "matcher module '%s' is not a request matcher" , matcherName )
2019-09-30 10:11:30 -05:00
}
matchers [ definitionName ] [ matcherName ] = caddyconfig . JSON ( rm , nil )
}
}
2020-01-09 16:00:32 -05:00
return nil
2019-09-30 10:11:30 -05:00
}
2019-12-10 15:36:46 -05:00
func encodeMatcherSet ( matchers map [ string ] caddyhttp . RequestMatcher ) ( caddy . ModuleMap , error ) {
msEncoded := make ( caddy . ModuleMap )
2019-08-09 13:05:47 -05:00
for matcherName , val := range matchers {
jsonBytes , err := json . Marshal ( val )
if err != nil {
return nil , fmt . Errorf ( "marshaling matcher set %#v: %v" , matchers , err )
}
msEncoded [ matcherName ] = jsonBytes
}
return msEncoded , nil
}
2019-08-22 14:38:37 -05:00
// tryInt tries to convert val to an integer. If it fails,
// it downgrades the error to a warning and returns 0.
func tryInt ( val interface { } , warnings * [ ] caddyconfig . Warning ) int {
intVal , ok := val . ( int )
if val != nil && ! ok && warnings != nil {
* warnings = append ( * warnings , caddyconfig . Warning { Message : "not an integer type" } )
2019-08-09 13:05:47 -05:00
}
2019-08-22 14:38:37 -05:00
return intVal
2019-08-09 13:05:47 -05:00
}
2020-02-26 18:01:47 -05:00
func tryString ( val interface { } , warnings * [ ] caddyconfig . Warning ) string {
stringVal , ok := val . ( string )
if val != nil && ! ok && warnings != nil {
* warnings = append ( * warnings , caddyconfig . Warning { Message : "not a string type" } )
}
return stringVal
}
2020-01-23 15:17:16 -05:00
// sliceContains returns true if needle is in haystack.
func sliceContains ( haystack [ ] string , needle string ) bool {
for _ , s := range haystack {
if s == needle {
return true
}
}
return false
}
2020-02-27 21:30:48 -05:00
// specificity returns len(s) minus any wildcards (*) and
2020-01-16 19:08:52 -05:00
// placeholders ({...}). Basically, it's a length count
// that penalizes the use of wildcards and placeholders.
// This is useful for comparing hostnames and paths.
// However, wildcards in paths are not a sure answer to
2020-02-27 21:30:48 -05:00
// the question of specificity. For example,
2020-01-16 19:08:52 -05:00
// '*.example.com' is clearly less specific than
// 'a.example.com', but is '/a' more or less specific
// than '/a*'?
func specificity ( s string ) int {
l := len ( s ) - strings . Count ( s , "*" )
for len ( s ) > 0 {
start := strings . Index ( s , "{" )
if start < 0 {
return l
}
end := strings . Index ( s [ start : ] , "}" ) + start + 1
if end <= start {
return l
}
l -= end - start
s = s [ end : ]
}
return l
}
type counter struct {
n * int
}
func ( c counter ) nextGroup ( ) string {
name := fmt . Sprintf ( "group%d" , * c . n )
* c . n ++
return name
2020-01-15 15:51:12 -05:00
}
2020-02-26 00:00:33 -05:00
type namedCustomLog struct {
name string
log * caddy . CustomLog
}
2020-02-27 21:30:48 -05:00
// sbAddrAssociation is a mapping from a list of
2019-08-09 13:05:47 -05:00
// addresses to a list of server blocks that are
// served on those addresses.
type sbAddrAssociation struct {
addresses [ ] string
2019-08-21 11:46:35 -05:00
serverBlocks [ ] serverBlock
2019-08-09 13:05:47 -05:00
}
2020-01-09 16:00:32 -05:00
const matcherPrefix = "@"
2019-08-09 13:05:47 -05:00
// Interface guard
var _ caddyfile . ServerType = ( * ServerType ) ( nil )