mirror of
https://github.com/caddyserver/caddy.git
synced 2024-12-16 21:56:40 -05:00
Implement config adapters and beginning of Caddyfile adapter
Along with several other changes, such as renaming caddyhttp.ServerRoute to caddyhttp.Route, exporting some types that were not exported before, and tweaking the caddytls TLS values to be more consistent. Notably, we also now disable automatic cert management for names which already have a cert (manually) loaded into the cache. These names no longer need to be specified in the "skip_certificates" field of the automatic HTTPS config, because they will be skipped automatically.
This commit is contained in:
parent
4950ce485f
commit
ab885f07b8
54 changed files with 4875 additions and 232 deletions
6
admin.go
6
admin.go
|
@ -96,8 +96,10 @@ func StartAdmin(initialConfigJSON []byte) error {
|
|||
///// END PPROF STUFF //////
|
||||
|
||||
for _, m := range GetModules("admin") {
|
||||
route := m.New().(AdminRoute)
|
||||
mux.Handle(route.Pattern, route)
|
||||
routes := m.New().([]AdminRoute)
|
||||
for _, route := range routes {
|
||||
mux.Handle(route.Pattern, route)
|
||||
}
|
||||
}
|
||||
|
||||
handler := cors.Default().Handler(mux)
|
||||
|
|
93
caddyconfig/caddyfile/adapter.go
Normal file
93
caddyconfig/caddyfile/adapter.go
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
)
|
||||
|
||||
// Adapter adapts Caddyfile to Caddy JSON.
|
||||
type Adapter struct {
|
||||
ServerType ServerType
|
||||
}
|
||||
|
||||
// Adapt converts the Caddyfile config in body to Caddy JSON.
|
||||
func (a Adapter) Adapt(body []byte, options map[string]string) ([]byte, []caddyconfig.Warning, error) {
|
||||
if a.ServerType == nil {
|
||||
return nil, nil, fmt.Errorf("no server type")
|
||||
}
|
||||
if options == nil {
|
||||
options = make(map[string]string)
|
||||
}
|
||||
|
||||
directives := a.ServerType.ValidDirectives()
|
||||
|
||||
filename := options["filename"]
|
||||
if filename == "" {
|
||||
filename = "Caddyfile"
|
||||
}
|
||||
|
||||
serverBlocks, err := Parse(filename, bytes.NewReader(body), directives)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg, warnings, err := a.ServerType.Setup(serverBlocks, options)
|
||||
if err != nil {
|
||||
return nil, warnings, err
|
||||
}
|
||||
|
||||
marshalFunc := json.Marshal
|
||||
if options["pretty"] == "true" {
|
||||
marshalFunc = caddyconfig.JSONIndent
|
||||
}
|
||||
result, err := marshalFunc(cfg)
|
||||
|
||||
return result, warnings, err
|
||||
}
|
||||
|
||||
// Unmarshaler is a type that can unmarshal
|
||||
// Caddyfile tokens to set itself up for a
|
||||
// JSON encoding. The goal of an unmarshaler
|
||||
// is not to set itself up for actual use,
|
||||
// but to set itself up for being marshaled
|
||||
// into JSON. Caddyfile-unmarshaled values
|
||||
// will not be used directly; they will be
|
||||
// encoded as JSON and then used from that.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalCaddyfile(d *Dispenser) error
|
||||
}
|
||||
|
||||
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
|
||||
type ServerType interface {
|
||||
// ValidDirectives returns a list of the
|
||||
// server type's recognized directives.
|
||||
ValidDirectives() []string
|
||||
|
||||
// Setup takes the server blocks which
|
||||
// contain tokens, as well as options
|
||||
// (e.g. CLI flags) and creates a Caddy
|
||||
// config, along with any warnings or
|
||||
// an error.
|
||||
Setup([]ServerBlock, map[string]string) (*caddy.Config, []caddyconfig.Warning, error)
|
||||
}
|
||||
|
||||
// Interface guard
|
||||
var _ caddyconfig.Adapter = (*Adapter)(nil)
|
333
caddyconfig/caddyfile/dispenser.go
Executable file
333
caddyconfig/caddyfile/dispenser.go
Executable file
|
@ -0,0 +1,333 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Dispenser is a type that dispenses tokens, similarly to a lexer,
|
||||
// except that it can do so with some notion of structure. An empty
|
||||
// Dispenser is invalid; call NewDispenser to make a proper instance.
|
||||
type Dispenser struct {
|
||||
filename string
|
||||
tokens []Token
|
||||
cursor int
|
||||
nesting int
|
||||
}
|
||||
|
||||
// NewDispenser returns a Dispenser filled with the given tokens.
|
||||
func NewDispenser(filename string, tokens []Token) *Dispenser {
|
||||
return &Dispenser{
|
||||
filename: filename,
|
||||
tokens: tokens,
|
||||
cursor: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Next loads the next token. Returns true if a token
|
||||
// was loaded; false otherwise. If false, all tokens
|
||||
// have been consumed.
|
||||
func (d *Dispenser) Next() bool {
|
||||
if d.cursor < len(d.tokens)-1 {
|
||||
d.cursor++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Prev moves to the previous token. It does the inverse
|
||||
// of Next(). Generally, this should only be used in
|
||||
// special cases such as deleting a token from the slice
|
||||
// that d is iterating. In that case, without using Prev(),
|
||||
// the dispenser would be pointing at the wrong token since
|
||||
// deleting a token implicitly advances the cursor.
|
||||
func (d *Dispenser) Prev() bool {
|
||||
if d.cursor > 0 {
|
||||
d.cursor--
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NextArg loads the next token if it is on the same
|
||||
// line and if it is not a block opening (open curly
|
||||
// brace). Returns true if an argument token was
|
||||
// loaded; false otherwise. If false, all tokens on
|
||||
// the line have been consumed except for potentially
|
||||
// a block opening. It handles imported tokens
|
||||
// correctly.
|
||||
func (d *Dispenser) NextArg() bool {
|
||||
if !d.nextOnSameLine() {
|
||||
return false
|
||||
}
|
||||
if d.Val() == "{" {
|
||||
// roll back; a block opening is not an argument
|
||||
d.cursor--
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nextOnSameLine advances the cursor if the next
|
||||
// token is on the same line of the same file.
|
||||
func (d *Dispenser) nextOnSameLine() bool {
|
||||
if d.cursor < 0 {
|
||||
d.cursor++
|
||||
return true
|
||||
}
|
||||
if d.cursor >= len(d.tokens) {
|
||||
return false
|
||||
}
|
||||
if d.cursor < len(d.tokens)-1 &&
|
||||
d.tokens[d.cursor].File == d.tokens[d.cursor+1].File &&
|
||||
d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line {
|
||||
d.cursor++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NextLine loads the next token only if it is not on the same
|
||||
// line as the current token, and returns true if a token was
|
||||
// loaded; false otherwise. If false, there is not another token
|
||||
// or it is on the same line. It handles imported tokens correctly.
|
||||
func (d *Dispenser) NextLine() bool {
|
||||
if d.cursor < 0 {
|
||||
d.cursor++
|
||||
return true
|
||||
}
|
||||
if d.cursor >= len(d.tokens) {
|
||||
return false
|
||||
}
|
||||
if d.cursor < len(d.tokens)-1 &&
|
||||
(d.tokens[d.cursor].File != d.tokens[d.cursor+1].File ||
|
||||
d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) {
|
||||
d.cursor++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NextBlock can be used as the condition of a for loop
|
||||
// to load the next token as long as it opens a block or
|
||||
// is already in a block. It returns true if a token was
|
||||
// loaded, or false when the block's closing curly brace
|
||||
// was loaded and thus the block ended. Nested blocks are
|
||||
// not supported.
|
||||
func (d *Dispenser) NextBlock() bool {
|
||||
if d.nesting > 0 {
|
||||
d.Next()
|
||||
if d.Val() == "}" {
|
||||
d.nesting--
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if !d.nextOnSameLine() { // block must open on same line
|
||||
return false
|
||||
}
|
||||
if d.Val() != "{" {
|
||||
d.cursor-- // roll back if not opening brace
|
||||
return false
|
||||
}
|
||||
d.Next()
|
||||
if d.Val() == "}" {
|
||||
// open and then closed right away
|
||||
return false
|
||||
}
|
||||
d.nesting++
|
||||
return true
|
||||
}
|
||||
|
||||
// Nested returns true if the token is currently nested
|
||||
// inside a block (i.e. an open curly brace was consumed).
|
||||
func (d *Dispenser) Nested() bool {
|
||||
return d.nesting > 0
|
||||
}
|
||||
|
||||
// Val gets the text of the current token. If there is no token
|
||||
// loaded, it returns empty string.
|
||||
func (d *Dispenser) Val() string {
|
||||
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
||||
return ""
|
||||
}
|
||||
return d.tokens[d.cursor].Text
|
||||
}
|
||||
|
||||
// Line gets the line number of the current token. If there is no token
|
||||
// loaded, it returns 0.
|
||||
func (d *Dispenser) Line() int {
|
||||
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
||||
return 0
|
||||
}
|
||||
return d.tokens[d.cursor].Line
|
||||
}
|
||||
|
||||
// File gets the filename of the current token. If there is no token loaded,
|
||||
// it returns the filename originally given when parsing started.
|
||||
func (d *Dispenser) File() string {
|
||||
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
||||
return d.filename
|
||||
}
|
||||
if tokenFilename := d.tokens[d.cursor].File; tokenFilename != "" {
|
||||
return tokenFilename
|
||||
}
|
||||
return d.filename
|
||||
}
|
||||
|
||||
// Args is a convenience function that loads the next arguments
|
||||
// (tokens on the same line) into an arbitrary number of strings
|
||||
// pointed to in targets. If there are fewer tokens available
|
||||
// than string pointers, the remaining strings will not be changed
|
||||
// and false will be returned. If there were enough tokens available
|
||||
// to fill the arguments, then true will be returned.
|
||||
func (d *Dispenser) Args(targets ...*string) bool {
|
||||
for i := 0; i < len(targets); i++ {
|
||||
if !d.NextArg() {
|
||||
return false
|
||||
}
|
||||
*targets[i] = d.Val()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RemainingArgs loads any more arguments (tokens on the same line)
|
||||
// into a slice and returns them. Open curly brace tokens also indicate
|
||||
// the end of arguments, and the curly brace is not included in
|
||||
// the return value nor is it loaded.
|
||||
func (d *Dispenser) RemainingArgs() []string {
|
||||
var args []string
|
||||
for d.NextArg() {
|
||||
args = append(args, d.Val())
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// NewFromNextTokens returns a new dispenser with a copy of
|
||||
// the tokens from the current token until the end of the
|
||||
// "directive" whether that be to the end of the line or
|
||||
// the end of a block that starts at the end of the line.
|
||||
func (d *Dispenser) NewFromNextTokens() *Dispenser {
|
||||
var tkns []Token
|
||||
tkns = append(tkns, d.Token())
|
||||
for d.NextArg() {
|
||||
tkns = append(tkns, d.Token())
|
||||
}
|
||||
if d.Next() && d.Val() == "{" {
|
||||
tkns = append(tkns, d.Token())
|
||||
for d.NextBlock() {
|
||||
for d.Nested() {
|
||||
tkns = append(tkns, d.Token())
|
||||
d.NextBlock()
|
||||
}
|
||||
}
|
||||
tkns = append(tkns, d.Token())
|
||||
} else {
|
||||
d.cursor--
|
||||
}
|
||||
return NewDispenser(d.filename, tkns)
|
||||
}
|
||||
|
||||
// Token returns the current token.
|
||||
func (d *Dispenser) Token() Token {
|
||||
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
||||
return Token{}
|
||||
}
|
||||
return d.tokens[d.cursor]
|
||||
}
|
||||
|
||||
// Cursor returns the current cursor (token index).
|
||||
func (d *Dispenser) Cursor() int {
|
||||
return d.cursor
|
||||
}
|
||||
|
||||
// ArgErr returns an argument error, meaning that another
|
||||
// argument was expected but not found. In other words,
|
||||
// a line break or open curly brace was encountered instead of
|
||||
// an argument.
|
||||
func (d *Dispenser) ArgErr() error {
|
||||
if d.Val() == "{" {
|
||||
return d.Err("Unexpected token '{', expecting argument")
|
||||
}
|
||||
return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val())
|
||||
}
|
||||
|
||||
// SyntaxErr creates a generic syntax error which explains what was
|
||||
// found and what was expected.
|
||||
func (d *Dispenser) SyntaxErr(expected string) error {
|
||||
msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected)
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
// EOFErr returns an error indicating that the dispenser reached
|
||||
// the end of the input when searching for the next token.
|
||||
func (d *Dispenser) EOFErr() error {
|
||||
return d.Errf("Unexpected EOF")
|
||||
}
|
||||
|
||||
// Err generates a custom parse-time error with a message of msg.
|
||||
func (d *Dispenser) Err(msg string) error {
|
||||
msg = fmt.Sprintf("%s:%d - Error during parsing: %s", d.File(), d.Line(), msg)
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
// Errf is like Err, but for formatted error messages
|
||||
func (d *Dispenser) Errf(format string, args ...interface{}) error {
|
||||
return d.Err(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Delete deletes the current token and returns the updated slice
|
||||
// of tokens. The cursor is not advanced to the next token.
|
||||
// Because deletion modifies the underlying slice, this method
|
||||
// should only be called if you have access to the original slice
|
||||
// of tokens and/or are using the slice of tokens outside this
|
||||
// Dispenser instance. If you do not re-assign the slice with the
|
||||
// return value of this method, inconsistencies in the token
|
||||
// array will become apparent (or worse, hide from you like they
|
||||
// did me for 3 and a half freaking hours late one night).
|
||||
func (d *Dispenser) Delete() []Token {
|
||||
if d.cursor >= 0 && d.cursor < len(d.tokens)-1 {
|
||||
d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...)
|
||||
d.cursor--
|
||||
}
|
||||
return d.tokens
|
||||
}
|
||||
|
||||
// numLineBreaks counts how many line breaks are in the token
|
||||
// value given by the token index tknIdx. It returns 0 if the
|
||||
// token does not exist or there are no line breaks.
|
||||
func (d *Dispenser) numLineBreaks(tknIdx int) int {
|
||||
if tknIdx < 0 || tknIdx >= len(d.tokens) {
|
||||
return 0
|
||||
}
|
||||
return strings.Count(d.tokens[tknIdx].Text, "\n")
|
||||
}
|
||||
|
||||
// isNewLine determines whether the current token is on a different
|
||||
// line (higher line number) than the previous token. It handles imported
|
||||
// tokens correctly. If there isn't a previous token, it returns true.
|
||||
func (d *Dispenser) isNewLine() bool {
|
||||
if d.cursor < 1 {
|
||||
return true
|
||||
}
|
||||
if d.cursor > len(d.tokens)-1 {
|
||||
return false
|
||||
}
|
||||
return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File ||
|
||||
d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line
|
||||
}
|
316
caddyconfig/caddyfile/dispenser_test.go
Executable file
316
caddyconfig/caddyfile/dispenser_test.go
Executable file
|
@ -0,0 +1,316 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDispenser_Val_Next(t *testing.T) {
|
||||
input := `host:port
|
||||
dir1 arg1
|
||||
dir2 arg2 arg3
|
||||
dir3`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
if val := d.Val(); val != "" {
|
||||
t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val)
|
||||
}
|
||||
|
||||
assertNext := func(shouldLoad bool, expectedCursor int, expectedVal string) {
|
||||
if loaded := d.Next(); loaded != shouldLoad {
|
||||
t.Errorf("Next(): Expected %v but got %v instead (val '%s')", shouldLoad, loaded, d.Val())
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("Expected cursor to be %d, but was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
if d.nesting != 0 {
|
||||
t.Errorf("Nesting should be 0, was %d instead", d.nesting)
|
||||
}
|
||||
if val := d.Val(); val != expectedVal {
|
||||
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
|
||||
}
|
||||
}
|
||||
|
||||
assertNext(true, 0, "host:port")
|
||||
assertNext(true, 1, "dir1")
|
||||
assertNext(true, 2, "arg1")
|
||||
assertNext(true, 3, "dir2")
|
||||
assertNext(true, 4, "arg2")
|
||||
assertNext(true, 5, "arg3")
|
||||
assertNext(true, 6, "dir3")
|
||||
// Note: This next test simply asserts existing behavior.
|
||||
// If desired, we may wish to empty the token value after
|
||||
// reading past the EOF. Open an issue if you want this change.
|
||||
assertNext(false, 6, "dir3")
|
||||
}
|
||||
|
||||
func TestDispenser_NextArg(t *testing.T) {
|
||||
input := `dir1 arg1
|
||||
dir2 arg2 arg3
|
||||
dir3`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) {
|
||||
if d.Next() != shouldLoad {
|
||||
t.Errorf("Next(): Should load token but got false instead (val: '%s')", d.Val())
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("Next(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
if val := d.Val(); val != expectedVal {
|
||||
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
|
||||
}
|
||||
}
|
||||
|
||||
assertNextArg := func(expectedVal string, loadAnother bool, expectedCursor int) {
|
||||
if !d.NextArg() {
|
||||
t.Error("NextArg(): Should load next argument but got false instead")
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("NextArg(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
if val := d.Val(); val != expectedVal {
|
||||
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
|
||||
}
|
||||
if !loadAnother {
|
||||
if d.NextArg() {
|
||||
t.Fatalf("NextArg(): Should NOT load another argument, but got true instead (val: '%s')", d.Val())
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("NextArg(): Expected cursor to remain at %d, but it was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertNext(true, "dir1", 0)
|
||||
assertNextArg("arg1", false, 1)
|
||||
assertNext(true, "dir2", 2)
|
||||
assertNextArg("arg2", true, 3)
|
||||
assertNextArg("arg3", false, 4)
|
||||
assertNext(true, "dir3", 5)
|
||||
assertNext(false, "dir3", 5)
|
||||
}
|
||||
|
||||
func TestDispenser_NextLine(t *testing.T) {
|
||||
input := `host:port
|
||||
dir1 arg1
|
||||
dir2 arg2 arg3`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) {
|
||||
if d.NextLine() != shouldLoad {
|
||||
t.Errorf("NextLine(): Should load token but got false instead (val: '%s')", d.Val())
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("NextLine(): Expected cursor to be %d, instead was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
if val := d.Val(); val != expectedVal {
|
||||
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
|
||||
}
|
||||
}
|
||||
|
||||
assertNextLine(true, "host:port", 0)
|
||||
assertNextLine(true, "dir1", 1)
|
||||
assertNextLine(false, "dir1", 1)
|
||||
d.Next() // arg1
|
||||
assertNextLine(true, "dir2", 3)
|
||||
assertNextLine(false, "dir2", 3)
|
||||
d.Next() // arg2
|
||||
assertNextLine(false, "arg2", 4)
|
||||
d.Next() // arg3
|
||||
assertNextLine(false, "arg3", 5)
|
||||
}
|
||||
|
||||
func TestDispenser_NextBlock(t *testing.T) {
|
||||
input := `foobar1 {
|
||||
sub1 arg1
|
||||
sub2
|
||||
}
|
||||
foobar2 {
|
||||
}`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) {
|
||||
if loaded := d.NextBlock(); loaded != shouldLoad {
|
||||
t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded)
|
||||
}
|
||||
if d.cursor != expectedCursor {
|
||||
t.Errorf("NextBlock(): Expected cursor to be %d, was %d", expectedCursor, d.cursor)
|
||||
}
|
||||
if d.nesting != expectedNesting {
|
||||
t.Errorf("NextBlock(): Nesting should be %d, not %d", expectedNesting, d.nesting)
|
||||
}
|
||||
}
|
||||
|
||||
assertNextBlock(false, -1, 0)
|
||||
d.Next() // foobar1
|
||||
assertNextBlock(true, 2, 1)
|
||||
assertNextBlock(true, 3, 1)
|
||||
assertNextBlock(true, 4, 1)
|
||||
assertNextBlock(false, 5, 0)
|
||||
d.Next() // foobar2
|
||||
assertNextBlock(false, 8, 0) // empty block is as if it didn't exist
|
||||
}
|
||||
|
||||
func TestDispenser_Args(t *testing.T) {
|
||||
var s1, s2, s3 string
|
||||
input := `dir1 arg1 arg2 arg3
|
||||
dir2 arg4 arg5
|
||||
dir3 arg6 arg7
|
||||
dir4`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
d.Next() // dir1
|
||||
|
||||
// As many strings as arguments
|
||||
if all := d.Args(&s1, &s2, &s3); !all {
|
||||
t.Error("Args(): Expected true, got false")
|
||||
}
|
||||
if s1 != "arg1" {
|
||||
t.Errorf("Args(): Expected s1 to be 'arg1', got '%s'", s1)
|
||||
}
|
||||
if s2 != "arg2" {
|
||||
t.Errorf("Args(): Expected s2 to be 'arg2', got '%s'", s2)
|
||||
}
|
||||
if s3 != "arg3" {
|
||||
t.Errorf("Args(): Expected s3 to be 'arg3', got '%s'", s3)
|
||||
}
|
||||
|
||||
d.Next() // dir2
|
||||
|
||||
// More strings than arguments
|
||||
if all := d.Args(&s1, &s2, &s3); all {
|
||||
t.Error("Args(): Expected false, got true")
|
||||
}
|
||||
if s1 != "arg4" {
|
||||
t.Errorf("Args(): Expected s1 to be 'arg4', got '%s'", s1)
|
||||
}
|
||||
if s2 != "arg5" {
|
||||
t.Errorf("Args(): Expected s2 to be 'arg5', got '%s'", s2)
|
||||
}
|
||||
if s3 != "arg3" {
|
||||
t.Errorf("Args(): Expected s3 to be unchanged ('arg3'), instead got '%s'", s3)
|
||||
}
|
||||
|
||||
// (quick cursor check just for kicks and giggles)
|
||||
if d.cursor != 6 {
|
||||
t.Errorf("Cursor should be 6, but is %d", d.cursor)
|
||||
}
|
||||
|
||||
d.Next() // dir3
|
||||
|
||||
// More arguments than strings
|
||||
if all := d.Args(&s1); !all {
|
||||
t.Error("Args(): Expected true, got false")
|
||||
}
|
||||
if s1 != "arg6" {
|
||||
t.Errorf("Args(): Expected s1 to be 'arg6', got '%s'", s1)
|
||||
}
|
||||
|
||||
d.Next() // dir4
|
||||
|
||||
// No arguments or strings
|
||||
if all := d.Args(); !all {
|
||||
t.Error("Args(): Expected true, got false")
|
||||
}
|
||||
|
||||
// No arguments but at least one string
|
||||
if all := d.Args(&s1); all {
|
||||
t.Error("Args(): Expected false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispenser_RemainingArgs(t *testing.T) {
|
||||
input := `dir1 arg1 arg2 arg3
|
||||
dir2 arg4 arg5
|
||||
dir3 arg6 { arg7
|
||||
dir4`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
d.Next() // dir1
|
||||
|
||||
args := d.RemainingArgs()
|
||||
if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(args, expected) {
|
||||
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
|
||||
}
|
||||
|
||||
d.Next() // dir2
|
||||
|
||||
args = d.RemainingArgs()
|
||||
if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(args, expected) {
|
||||
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
|
||||
}
|
||||
|
||||
d.Next() // dir3
|
||||
|
||||
args = d.RemainingArgs()
|
||||
if expected := []string{"arg6"}; !reflect.DeepEqual(args, expected) {
|
||||
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
|
||||
}
|
||||
|
||||
d.Next() // {
|
||||
d.Next() // arg7
|
||||
d.Next() // dir4
|
||||
|
||||
args = d.RemainingArgs()
|
||||
if len(args) != 0 {
|
||||
t.Errorf("RemainingArgs(): Expected %v, got %v", []string{}, args)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispenser_ArgErr_Err(t *testing.T) {
|
||||
input := `dir1 {
|
||||
}
|
||||
dir2 arg1 arg2`
|
||||
d := newTestDispenser(input)
|
||||
|
||||
d.cursor = 1 // {
|
||||
|
||||
if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "{") {
|
||||
t.Errorf("ArgErr(): Expected an error message with { in it, but got '%v'", err)
|
||||
}
|
||||
|
||||
d.cursor = 5 // arg2
|
||||
|
||||
if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "arg2") {
|
||||
t.Errorf("ArgErr(): Expected an error message with 'arg2' in it; got '%v'", err)
|
||||
}
|
||||
|
||||
err := d.Err("foobar")
|
||||
if err == nil {
|
||||
t.Fatalf("Err(): Expected an error, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "Testfile:3") {
|
||||
t.Errorf("Expected error message with filename:line in it; got '%v'", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "foobar") {
|
||||
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestDispenser(input string) *Dispenser {
|
||||
tokens, err := allTokens(strings.NewReader(input))
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatalf("getting all tokens from input: %v", err)
|
||||
}
|
||||
return NewDispenser("Testfile", tokens)
|
||||
}
|
150
caddyconfig/caddyfile/lexer.go
Executable file
150
caddyconfig/caddyfile/lexer.go
Executable file
|
@ -0,0 +1,150 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type (
|
||||
// lexer is a utility which can get values, token by
|
||||
// token, from a Reader. A token is a word, and tokens
|
||||
// are separated by whitespace. A word can be enclosed
|
||||
// in quotes if it contains whitespace.
|
||||
lexer struct {
|
||||
reader *bufio.Reader
|
||||
token Token
|
||||
line int
|
||||
}
|
||||
|
||||
// Token represents a single parsable unit.
|
||||
Token struct {
|
||||
File string
|
||||
Line int
|
||||
Text string
|
||||
}
|
||||
)
|
||||
|
||||
// load prepares the lexer to scan an input for tokens.
|
||||
// It discards any leading byte order mark.
|
||||
func (l *lexer) load(input io.Reader) error {
|
||||
l.reader = bufio.NewReader(input)
|
||||
l.line = 1
|
||||
|
||||
// discard byte order mark, if present
|
||||
firstCh, _, err := l.reader.ReadRune()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if firstCh != 0xFEFF {
|
||||
err := l.reader.UnreadRune()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// next loads the next token into the lexer.
|
||||
// A token is delimited by whitespace, unless
|
||||
// the token starts with a quotes character (")
|
||||
// in which case the token goes until the closing
|
||||
// quotes (the enclosing quotes are not included).
|
||||
// Inside quoted strings, quotes may be escaped
|
||||
// with a preceding \ character. No other chars
|
||||
// may be escaped. The rest of the line is skipped
|
||||
// if a "#" character is read in. Returns true if
|
||||
// a token was loaded; false otherwise.
|
||||
func (l *lexer) next() bool {
|
||||
var val []rune
|
||||
var comment, quoted, escaped bool
|
||||
|
||||
makeToken := func() bool {
|
||||
l.token.Text = string(val)
|
||||
return true
|
||||
}
|
||||
|
||||
for {
|
||||
ch, _, err := l.reader.ReadRune()
|
||||
if err != nil {
|
||||
if len(val) > 0 {
|
||||
return makeToken()
|
||||
}
|
||||
if err == io.EOF {
|
||||
return false
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if quoted {
|
||||
if !escaped {
|
||||
if ch == '\\' {
|
||||
escaped = true
|
||||
continue
|
||||
} else if ch == '"' {
|
||||
quoted = false
|
||||
return makeToken()
|
||||
}
|
||||
}
|
||||
if ch == '\n' {
|
||||
l.line++
|
||||
}
|
||||
if escaped {
|
||||
// only escape quotes and newlines
|
||||
if ch != '"' && ch != '\n' {
|
||||
val = append(val, '\\')
|
||||
}
|
||||
}
|
||||
val = append(val, ch)
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsSpace(ch) {
|
||||
if ch == '\r' {
|
||||
continue
|
||||
}
|
||||
if ch == '\n' {
|
||||
l.line++
|
||||
comment = false
|
||||
}
|
||||
if len(val) > 0 {
|
||||
return makeToken()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == '#' {
|
||||
comment = true
|
||||
}
|
||||
|
||||
if comment {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(val) == 0 {
|
||||
l.token = Token{Line: l.line}
|
||||
if ch == '"' {
|
||||
quoted = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
val = append(val, ch)
|
||||
}
|
||||
}
|
196
caddyconfig/caddyfile/lexer_test.go
Executable file
196
caddyconfig/caddyfile/lexer_test.go
Executable file
|
@ -0,0 +1,196 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type lexerTestCase struct {
|
||||
input string
|
||||
expected []Token
|
||||
}
|
||||
|
||||
func TestLexer(t *testing.T) {
|
||||
testCases := []lexerTestCase{
|
||||
{
|
||||
input: `host:123`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "host:123"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `host:123
|
||||
|
||||
directive`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "host:123"},
|
||||
{Line: 3, Text: "directive"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `host:123 {
|
||||
directive
|
||||
}`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "host:123"},
|
||||
{Line: 1, Text: "{"},
|
||||
{Line: 2, Text: "directive"},
|
||||
{Line: 3, Text: "}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `host:123 { directive }`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "host:123"},
|
||||
{Line: 1, Text: "{"},
|
||||
{Line: 1, Text: "directive"},
|
||||
{Line: 1, Text: "}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `host:123 {
|
||||
#comment
|
||||
directive
|
||||
# comment
|
||||
foobar # another comment
|
||||
}`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "host:123"},
|
||||
{Line: 1, Text: "{"},
|
||||
{Line: 3, Text: "directive"},
|
||||
{Line: 5, Text: "foobar"},
|
||||
{Line: 6, Text: "}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `a "quoted value" b
|
||||
foobar`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "a"},
|
||||
{Line: 1, Text: "quoted value"},
|
||||
{Line: 1, Text: "b"},
|
||||
{Line: 2, Text: "foobar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `A "quoted \"value\" inside" B`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "A"},
|
||||
{Line: 1, Text: `quoted "value" inside`},
|
||||
{Line: 1, Text: "B"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "A \"newline \\\ninside\" quotes",
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "A"},
|
||||
{Line: 1, Text: "newline \ninside"},
|
||||
{Line: 2, Text: "quotes"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `"don't\escape"`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: `don't\escape`},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `"don't\\escape"`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: `don't\\escape`},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `A "quoted value with line
|
||||
break inside" {
|
||||
foobar
|
||||
}`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "A"},
|
||||
{Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"},
|
||||
{Line: 2, Text: "{"},
|
||||
{Line: 3, Text: "foobar"},
|
||||
{Line: 4, Text: "}"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `"C:\php\php-cgi.exe"`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: `C:\php\php-cgi.exe`},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `empty "" string`,
|
||||
expected: []Token{
|
||||
{Line: 1, Text: `empty`},
|
||||
{Line: 1, Text: ``},
|
||||
{Line: 1, Text: `string`},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "skip those\r\nCR characters",
|
||||
expected: []Token{
|
||||
{Line: 1, Text: "skip"},
|
||||
{Line: 1, Text: "those"},
|
||||
{Line: 2, Text: "CR"},
|
||||
{Line: 2, Text: "characters"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "\xEF\xBB\xBF:8080", // test with leading byte order mark
|
||||
expected: []Token{
|
||||
{Line: 1, Text: ":8080"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actual := tokenize(testCase.input)
|
||||
lexerCompare(t, i, testCase.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func tokenize(input string) (tokens []Token) {
|
||||
l := lexer{}
|
||||
if err := l.load(strings.NewReader(input)); err != nil {
|
||||
log.Printf("[ERROR] load failed: %v", err)
|
||||
}
|
||||
for l.next() {
|
||||
tokens = append(tokens, l.token)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lexerCompare(t *testing.T, n int, expected, actual []Token) {
|
||||
if len(expected) != len(actual) {
|
||||
t.Errorf("Test case %d: expected %d token(s) but got %d", n, len(expected), len(actual))
|
||||
}
|
||||
|
||||
for i := 0; i < len(actual) && i < len(expected); i++ {
|
||||
if actual[i].Line != expected[i].Line {
|
||||
t.Errorf("Test case %d token %d ('%s'): expected line %d but was line %d",
|
||||
n, i, expected[i].Text, expected[i].Line, actual[i].Line)
|
||||
break
|
||||
}
|
||||
if actual[i].Text != expected[i].Text {
|
||||
t.Errorf("Test case %d token %d: expected text '%s' but was '%s'",
|
||||
n, i, expected[i].Text, actual[i].Text)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
492
caddyconfig/caddyfile/parse.go
Executable file
492
caddyconfig/caddyfile/parse.go
Executable file
|
@ -0,0 +1,492 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse parses the input just enough to group tokens, in
|
||||
// order, by server block. No further parsing is performed.
|
||||
// Server blocks are returned in the order in which they appear.
|
||||
// Directives that do not appear in validDirectives will cause
|
||||
// an error. If you do not want to check for valid directives,
|
||||
// pass in nil instead.
|
||||
func Parse(filename string, input io.Reader, validDirectives []string) ([]ServerBlock, error) {
|
||||
tokens, err := allTokens(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := parser{Dispenser: NewDispenser(filename, tokens), validDirectives: validDirectives}
|
||||
return p.parseAll()
|
||||
}
|
||||
|
||||
// allTokens lexes the entire input, but does not parse it.
|
||||
// It returns all the tokens from the input, unstructured
|
||||
// and in order.
|
||||
func allTokens(input io.Reader) ([]Token, error) {
|
||||
l := new(lexer)
|
||||
err := l.load(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tokens []Token
|
||||
for l.next() {
|
||||
tokens = append(tokens, l.token)
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
*Dispenser
|
||||
block ServerBlock // current server block being parsed
|
||||
validDirectives []string // a directive must be valid or it's an error
|
||||
eof bool // if we encounter a valid EOF in a hard place
|
||||
definedSnippets map[string][]Token
|
||||
}
|
||||
|
||||
func (p *parser) parseAll() ([]ServerBlock, error) {
|
||||
var blocks []ServerBlock
|
||||
|
||||
for p.Next() {
|
||||
err := p.parseOne()
|
||||
if err != nil {
|
||||
return blocks, err
|
||||
}
|
||||
if len(p.block.Keys) > 0 {
|
||||
blocks = append(blocks, p.block)
|
||||
}
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func (p *parser) parseOne() error {
|
||||
p.block = ServerBlock{Tokens: make(map[string][]Token)}
|
||||
|
||||
return p.begin()
|
||||
}
|
||||
|
||||
func (p *parser) begin() error {
|
||||
if len(p.tokens) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.addresses()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.eof {
|
||||
// this happens if the Caddyfile consists of only
|
||||
// a line of addresses and nothing else
|
||||
return nil
|
||||
}
|
||||
|
||||
if ok, name := p.isSnippet(); ok {
|
||||
if p.definedSnippets == nil {
|
||||
p.definedSnippets = map[string][]Token{}
|
||||
}
|
||||
if _, found := p.definedSnippets[name]; found {
|
||||
return p.Errf("redeclaration of previously declared snippet %s", name)
|
||||
}
|
||||
// consume all tokens til matched close brace
|
||||
tokens, err := p.snippetTokens()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.definedSnippets[name] = tokens
|
||||
// empty block keys so we don't save this block as a real server.
|
||||
p.block.Keys = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.blockContents()
|
||||
}
|
||||
|
||||
func (p *parser) addresses() error {
|
||||
var expectingAnother bool
|
||||
|
||||
for {
|
||||
tkn := replaceEnvVars(p.Val())
|
||||
|
||||
// special case: import directive replaces tokens during parse-time
|
||||
if tkn == "import" && p.isNewLine() {
|
||||
err := p.doImport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Open brace definitely indicates end of addresses
|
||||
if tkn == "{" {
|
||||
if expectingAnother {
|
||||
return p.Errf("Expected another address but had '%s' - check for extra comma", tkn)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if tkn != "" { // empty token possible if user typed ""
|
||||
// Trailing comma indicates another address will follow, which
|
||||
// may possibly be on the next line
|
||||
if tkn[len(tkn)-1] == ',' {
|
||||
tkn = tkn[:len(tkn)-1]
|
||||
expectingAnother = true
|
||||
} else {
|
||||
expectingAnother = false // but we may still see another one on this line
|
||||
}
|
||||
|
||||
p.block.Keys = append(p.block.Keys, tkn)
|
||||
}
|
||||
|
||||
// Advance token and possibly break out of loop or return error
|
||||
hasNext := p.Next()
|
||||
if expectingAnother && !hasNext {
|
||||
return p.EOFErr()
|
||||
}
|
||||
if !hasNext {
|
||||
p.eof = true
|
||||
break // EOF
|
||||
}
|
||||
if !expectingAnother && p.isNewLine() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) blockContents() error {
|
||||
errOpenCurlyBrace := p.openCurlyBrace()
|
||||
if errOpenCurlyBrace != nil {
|
||||
// single-server configs don't need curly braces
|
||||
p.cursor--
|
||||
}
|
||||
|
||||
err := p.directives()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only look for close curly brace if there was an opening
|
||||
if errOpenCurlyBrace == nil {
|
||||
err = p.closeCurlyBrace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// directives parses through all the lines for directives
|
||||
// and it expects the next token to be the first
|
||||
// directive. It goes until EOF or closing curly brace
|
||||
// which ends the server block.
|
||||
func (p *parser) directives() error {
|
||||
for p.Next() {
|
||||
// end of server block
|
||||
if p.Val() == "}" {
|
||||
break
|
||||
}
|
||||
|
||||
// special case: import directive replaces tokens during parse-time
|
||||
if p.Val() == "import" {
|
||||
err := p.doImport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.cursor-- // cursor is advanced when we continue, so roll back one more
|
||||
continue
|
||||
}
|
||||
|
||||
// normal case: parse a directive on this line
|
||||
if err := p.directive(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// doImport swaps out the import directive and its argument
|
||||
// (a total of 2 tokens) with the tokens in the specified file
|
||||
// or globbing pattern. When the function returns, the cursor
|
||||
// is on the token before where the import directive was. In
|
||||
// other words, call Next() to access the first token that was
|
||||
// imported.
|
||||
func (p *parser) doImport() error {
|
||||
// syntax checks
|
||||
if !p.NextArg() {
|
||||
return p.ArgErr()
|
||||
}
|
||||
importPattern := replaceEnvVars(p.Val())
|
||||
if importPattern == "" {
|
||||
return p.Err("Import requires a non-empty filepath")
|
||||
}
|
||||
if p.NextArg() {
|
||||
return p.Err("Import takes only one argument (glob pattern or file)")
|
||||
}
|
||||
// splice out the import directive and its argument (2 tokens total)
|
||||
tokensBefore := p.tokens[:p.cursor-1]
|
||||
tokensAfter := p.tokens[p.cursor+1:]
|
||||
var importedTokens []Token
|
||||
|
||||
// first check snippets. That is a simple, non-recursive replacement
|
||||
if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
|
||||
importedTokens = p.definedSnippets[importPattern]
|
||||
} else {
|
||||
// make path relative to the file of the _token_ being processed rather
|
||||
// than current working directory (issue #867) and then use glob to get
|
||||
// list of matching filenames
|
||||
absFile, err := filepath.Abs(p.Dispenser.File())
|
||||
if err != nil {
|
||||
return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err)
|
||||
}
|
||||
|
||||
var matches []string
|
||||
var globPattern string
|
||||
if !filepath.IsAbs(importPattern) {
|
||||
globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
|
||||
} else {
|
||||
globPattern = importPattern
|
||||
}
|
||||
if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 ||
|
||||
(strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) {
|
||||
// See issue #2096 - a pattern with many glob expansions can hang for too long
|
||||
return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
|
||||
}
|
||||
matches, err = filepath.Glob(globPattern)
|
||||
|
||||
if err != nil {
|
||||
return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
if strings.ContainsAny(globPattern, "*?[]") {
|
||||
log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern)
|
||||
} else {
|
||||
return p.Errf("File to import not found: %s", importPattern)
|
||||
}
|
||||
}
|
||||
|
||||
// collect all the imported tokens
|
||||
|
||||
for _, importFile := range matches {
|
||||
newTokens, err := p.doSingleImport(importFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
importedTokens = append(importedTokens, newTokens...)
|
||||
}
|
||||
}
|
||||
|
||||
// splice the imported tokens in the place of the import statement
|
||||
// and rewind cursor so Next() will land on first imported token
|
||||
p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...)
|
||||
p.cursor--
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doSingleImport lexes the individual file at importFile and returns
|
||||
// its tokens or an error, if any.
|
||||
func (p *parser) doSingleImport(importFile string) ([]Token, error) {
|
||||
file, err := os.Open(importFile)
|
||||
if err != nil {
|
||||
return nil, p.Errf("Could not import %s: %v", importFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if info, err := file.Stat(); err != nil {
|
||||
return nil, p.Errf("Could not import %s: %v", importFile, err)
|
||||
} else if info.IsDir() {
|
||||
return nil, p.Errf("Could not import %s: is a directory", importFile)
|
||||
}
|
||||
|
||||
importedTokens, err := allTokens(file)
|
||||
if err != nil {
|
||||
return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
|
||||
}
|
||||
|
||||
// Tack the file path onto these tokens so errors show the imported file's name
|
||||
// (we use full, absolute path to avoid bugs: issue #1892)
|
||||
filename, err := filepath.Abs(importFile)
|
||||
if err != nil {
|
||||
return nil, p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err)
|
||||
}
|
||||
for i := 0; i < len(importedTokens); i++ {
|
||||
importedTokens[i].File = filename
|
||||
}
|
||||
|
||||
return importedTokens, nil
|
||||
}
|
||||
|
||||
// directive collects tokens until the directive's scope
|
||||
// closes (either end of line or end of curly brace block).
|
||||
// It expects the currently-loaded token to be a directive
|
||||
// (or } that ends a server block). The collected tokens
|
||||
// are loaded into the current server block for later use
|
||||
// by directive setup functions.
|
||||
func (p *parser) directive() error {
|
||||
dir := replaceEnvVars(p.Val())
|
||||
nesting := 0
|
||||
|
||||
if !p.validDirective(dir) {
|
||||
return p.Errf("Unknown directive '%s'", dir)
|
||||
}
|
||||
|
||||
// The directive itself is appended as a relevant token
|
||||
p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
|
||||
|
||||
for p.Next() {
|
||||
if p.Val() == "{" {
|
||||
nesting++
|
||||
} else if p.isNewLine() && nesting == 0 {
|
||||
p.cursor-- // read too far
|
||||
break
|
||||
} else if p.Val() == "}" && nesting > 0 {
|
||||
nesting--
|
||||
} else if p.Val() == "}" && nesting == 0 {
|
||||
return p.Err("Unexpected '}' because no matching opening brace")
|
||||
} else if p.Val() == "import" && p.isNewLine() {
|
||||
if err := p.doImport(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.cursor-- // cursor is advanced when we continue, so roll back one more
|
||||
continue
|
||||
}
|
||||
p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
|
||||
p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
|
||||
}
|
||||
|
||||
if nesting > 0 {
|
||||
return p.EOFErr()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// openCurlyBrace expects the current token to be an
|
||||
// opening curly brace. This acts like an assertion
|
||||
// because it returns an error if the token is not
|
||||
// a opening curly brace. It does NOT advance the token.
|
||||
func (p *parser) openCurlyBrace() error {
|
||||
if p.Val() != "{" {
|
||||
return p.SyntaxErr("{")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeCurlyBrace expects the current token to be
|
||||
// a closing curly brace. This acts like an assertion
|
||||
// because it returns an error if the token is not
|
||||
// a closing curly brace. It does NOT advance the token.
|
||||
func (p *parser) closeCurlyBrace() error {
|
||||
if p.Val() != "}" {
|
||||
return p.SyntaxErr("}")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validDirective returns true if dir is in p.validDirectives.
|
||||
func (p *parser) validDirective(dir string) bool {
|
||||
if p.validDirectives == nil {
|
||||
return true
|
||||
}
|
||||
for _, d := range p.validDirectives {
|
||||
if d == dir {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// replaceEnvVars replaces environment variables that appear in the token
|
||||
// and understands both the $UNIX and %WINDOWS% syntaxes.
|
||||
func replaceEnvVars(s string) string {
|
||||
s = replaceEnvReferences(s, "{%", "%}")
|
||||
s = replaceEnvReferences(s, "{$", "}")
|
||||
return s
|
||||
}
|
||||
|
||||
// replaceEnvReferences performs the actual replacement of env variables
|
||||
// in s, given the placeholder start and placeholder end strings.
|
||||
func replaceEnvReferences(s, refStart, refEnd string) string {
|
||||
index := strings.Index(s, refStart)
|
||||
for index != -1 {
|
||||
endIndex := strings.Index(s[index:], refEnd)
|
||||
if endIndex == -1 {
|
||||
break
|
||||
}
|
||||
|
||||
endIndex += index
|
||||
if endIndex > index+len(refStart) {
|
||||
ref := s[index : endIndex+len(refEnd)]
|
||||
s = strings.Replace(s, ref, os.Getenv(ref[len(refStart):len(ref)-len(refEnd)]), -1)
|
||||
} else {
|
||||
return s
|
||||
}
|
||||
index = strings.Index(s, refStart)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ServerBlock associates any number of keys (usually addresses
|
||||
// of some sort) with tokens (grouped by directive name).
|
||||
type ServerBlock struct {
|
||||
Keys []string
|
||||
Tokens map[string][]Token
|
||||
}
|
||||
|
||||
func (p *parser) isSnippet() (bool, string) {
|
||||
keys := p.block.Keys
|
||||
// A snippet block is a single key with parens. Nothing else qualifies.
|
||||
if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") {
|
||||
return true, strings.TrimSuffix(keys[0][1:], ")")
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// read and store everything in a block for later replay.
|
||||
func (p *parser) snippetTokens() ([]Token, error) {
|
||||
// snippet must have curlies.
|
||||
err := p.openCurlyBrace()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
count := 1
|
||||
tokens := []Token{}
|
||||
for p.Next() {
|
||||
if p.Val() == "}" {
|
||||
count--
|
||||
if count == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.Val() == "{" {
|
||||
count++
|
||||
}
|
||||
tokens = append(tokens, p.tokens[p.cursor])
|
||||
}
|
||||
// make sure we're matched up
|
||||
if count != 0 {
|
||||
return nil, p.SyntaxErr("}")
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
718
caddyconfig/caddyfile/parse_test.go
Executable file
718
caddyconfig/caddyfile/parse_test.go
Executable file
|
@ -0,0 +1,718 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyfile
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAllTokens(t *testing.T) {
|
||||
input := strings.NewReader("a b c\nd e")
|
||||
expected := []string{"a", "b", "c", "d", "e"}
|
||||
tokens, err := allTokens(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
if len(tokens) != len(expected) {
|
||||
t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens))
|
||||
}
|
||||
|
||||
for i, val := range expected {
|
||||
if tokens[i].Text != val {
|
||||
t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseOneAndImport(t *testing.T) {
|
||||
testParseOne := func(input string) (ServerBlock, error) {
|
||||
p := testParser(input)
|
||||
p.Next() // parseOne doesn't call Next() to start, so we must
|
||||
err := p.parseOne()
|
||||
return p.block, err
|
||||
}
|
||||
|
||||
for i, test := range []struct {
|
||||
input string
|
||||
shouldErr bool
|
||||
keys []string
|
||||
tokens map[string]int // map of directive name to number of tokens expected
|
||||
}{
|
||||
{`localhost`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{}},
|
||||
|
||||
{`localhost
|
||||
dir1`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 1,
|
||||
}},
|
||||
|
||||
{`localhost:1234
|
||||
dir1 foo bar`, false, []string{
|
||||
"localhost:1234",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`localhost {
|
||||
dir1
|
||||
}`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 1,
|
||||
}},
|
||||
|
||||
{`localhost:1234 {
|
||||
dir1 foo bar
|
||||
dir2
|
||||
}`, false, []string{
|
||||
"localhost:1234",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
"dir2": 1,
|
||||
}},
|
||||
|
||||
{`http://localhost https://localhost
|
||||
dir1 foo bar`, false, []string{
|
||||
"http://localhost",
|
||||
"https://localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`http://localhost https://localhost {
|
||||
dir1 foo bar
|
||||
}`, false, []string{
|
||||
"http://localhost",
|
||||
"https://localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`http://localhost, https://localhost {
|
||||
dir1 foo bar
|
||||
}`, false, []string{
|
||||
"http://localhost",
|
||||
"https://localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`http://localhost, {
|
||||
}`, true, []string{
|
||||
"http://localhost",
|
||||
}, map[string]int{}},
|
||||
|
||||
{`host1:80, http://host2.com
|
||||
dir1 foo bar
|
||||
dir2 baz`, false, []string{
|
||||
"host1:80",
|
||||
"http://host2.com",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
"dir2": 2,
|
||||
}},
|
||||
|
||||
{`http://host1.com,
|
||||
http://host2.com,
|
||||
https://host3.com`, false, []string{
|
||||
"http://host1.com",
|
||||
"http://host2.com",
|
||||
"https://host3.com",
|
||||
}, map[string]int{}},
|
||||
|
||||
{`http://host1.com:1234, https://host2.com
|
||||
dir1 foo {
|
||||
bar baz
|
||||
}
|
||||
dir2`, false, []string{
|
||||
"http://host1.com:1234",
|
||||
"https://host2.com",
|
||||
}, map[string]int{
|
||||
"dir1": 6,
|
||||
"dir2": 1,
|
||||
}},
|
||||
|
||||
{`127.0.0.1
|
||||
dir1 {
|
||||
bar baz
|
||||
}
|
||||
dir2 {
|
||||
foo bar
|
||||
}`, false, []string{
|
||||
"127.0.0.1",
|
||||
}, map[string]int{
|
||||
"dir1": 5,
|
||||
"dir2": 5,
|
||||
}},
|
||||
|
||||
{`localhost
|
||||
dir1 {
|
||||
foo`, true, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`localhost
|
||||
dir1 {
|
||||
}`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`localhost
|
||||
dir1 {
|
||||
} }`, true, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 3,
|
||||
}},
|
||||
|
||||
{`localhost
|
||||
dir1 {
|
||||
nested {
|
||||
foo
|
||||
}
|
||||
}
|
||||
dir2 foo bar`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 7,
|
||||
"dir2": 3,
|
||||
}},
|
||||
|
||||
{``, false, []string{}, map[string]int{}},
|
||||
|
||||
{`localhost
|
||||
dir1 arg1
|
||||
import testdata/import_test1.txt`, false, []string{
|
||||
"localhost",
|
||||
}, map[string]int{
|
||||
"dir1": 2,
|
||||
"dir2": 3,
|
||||
"dir3": 1,
|
||||
}},
|
||||
|
||||
{`import testdata/import_test2.txt`, false, []string{
|
||||
"host1",
|
||||
}, map[string]int{
|
||||
"dir1": 1,
|
||||
"dir2": 2,
|
||||
}},
|
||||
|
||||
{`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, map[string]int{}},
|
||||
|
||||
{`import testdata/not_found.txt`, true, []string{}, map[string]int{}},
|
||||
|
||||
{`""`, false, []string{}, map[string]int{}},
|
||||
|
||||
{``, false, []string{}, map[string]int{}},
|
||||
|
||||
// test cases found by fuzzing!
|
||||
{`import }{$"`, true, []string{}, map[string]int{}},
|
||||
{`import /*/*.txt`, true, []string{}, map[string]int{}},
|
||||
{`import /???/?*?o`, true, []string{}, map[string]int{}},
|
||||
{`import /??`, true, []string{}, map[string]int{}},
|
||||
{`import /[a-z]`, true, []string{}, map[string]int{}},
|
||||
{`import {$}`, true, []string{}, map[string]int{}},
|
||||
{`import {%}`, true, []string{}, map[string]int{}},
|
||||
{`import {$$}`, true, []string{}, map[string]int{}},
|
||||
{`import {%%}`, true, []string{}, map[string]int{}},
|
||||
} {
|
||||
result, err := testParseOne(test.input)
|
||||
|
||||
if test.shouldErr && err == nil {
|
||||
t.Errorf("Test %d: Expected an error, but didn't get one", i)
|
||||
}
|
||||
if !test.shouldErr && err != nil {
|
||||
t.Errorf("Test %d: Expected no error, but got: %v", i, err)
|
||||
}
|
||||
|
||||
if len(result.Keys) != len(test.keys) {
|
||||
t.Errorf("Test %d: Expected %d keys, got %d",
|
||||
i, len(test.keys), len(result.Keys))
|
||||
continue
|
||||
}
|
||||
for j, addr := range result.Keys {
|
||||
if addr != test.keys[j] {
|
||||
t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
|
||||
i, j, test.keys[j], addr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(result.Tokens) != len(test.tokens) {
|
||||
t.Errorf("Test %d: Expected %d directives, had %d",
|
||||
i, len(test.tokens), len(result.Tokens))
|
||||
continue
|
||||
}
|
||||
for directive, tokens := range result.Tokens {
|
||||
if len(tokens) != test.tokens[directive] {
|
||||
t.Errorf("Test %d, directive '%s': Expected %d tokens, counted %d",
|
||||
i, directive, test.tokens[directive], len(tokens))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecursiveImport(t *testing.T) {
|
||||
testParseOne := func(input string) (ServerBlock, error) {
|
||||
p := testParser(input)
|
||||
p.Next() // parseOne doesn't call Next() to start, so we must
|
||||
err := p.parseOne()
|
||||
return p.block, err
|
||||
}
|
||||
|
||||
isExpected := func(got ServerBlock) bool {
|
||||
if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
|
||||
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
|
||||
return false
|
||||
}
|
||||
if len(got.Tokens) != 2 {
|
||||
t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens))
|
||||
return false
|
||||
}
|
||||
if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["dir2"]) != 2 {
|
||||
t.Errorf("got unexpect tokens: %v", got.Tokens)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// test relative recursive import
|
||||
err = ioutil.WriteFile(recursiveFile1, []byte(
|
||||
`localhost
|
||||
dir1
|
||||
import recursive_import_test2`), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(recursiveFile1)
|
||||
|
||||
err = ioutil.WriteFile(recursiveFile2, []byte("dir2 1"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(recursiveFile2)
|
||||
|
||||
// import absolute path
|
||||
result, err := testParseOne("import " + recursiveFile1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isExpected(result) {
|
||||
t.Error("absolute+relative import failed")
|
||||
}
|
||||
|
||||
// import relative path
|
||||
result, err = testParseOne("import testdata/recursive_import_test1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isExpected(result) {
|
||||
t.Error("relative+relative import failed")
|
||||
}
|
||||
|
||||
// test absolute recursive import
|
||||
err = ioutil.WriteFile(recursiveFile1, []byte(
|
||||
`localhost
|
||||
dir1
|
||||
import `+recursiveFile2), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// import absolute path
|
||||
result, err = testParseOne("import " + recursiveFile1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isExpected(result) {
|
||||
t.Error("absolute+absolute import failed")
|
||||
}
|
||||
|
||||
// import relative path
|
||||
result, err = testParseOne("import testdata/recursive_import_test1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isExpected(result) {
|
||||
t.Error("relative+absolute import failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirectiveImport(t *testing.T) {
|
||||
testParseOne := func(input string) (ServerBlock, error) {
|
||||
p := testParser(input)
|
||||
p.Next() // parseOne doesn't call Next() to start, so we must
|
||||
err := p.parseOne()
|
||||
return p.block, err
|
||||
}
|
||||
|
||||
isExpected := func(got ServerBlock) bool {
|
||||
if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
|
||||
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
|
||||
return false
|
||||
}
|
||||
if len(got.Tokens) != 2 {
|
||||
t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens))
|
||||
return false
|
||||
}
|
||||
if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["proxy"]) != 8 {
|
||||
t.Errorf("got unexpect tokens: %v", got.Tokens)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
directiveFile, err := filepath.Abs("testdata/directive_import_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(directiveFile, []byte(`prop1 1
|
||||
prop2 2`), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(directiveFile)
|
||||
|
||||
// import from existing file
|
||||
result, err := testParseOne(`localhost
|
||||
dir1
|
||||
proxy {
|
||||
import testdata/directive_import_test
|
||||
transparent
|
||||
}`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isExpected(result) {
|
||||
t.Error("directive import failed")
|
||||
}
|
||||
|
||||
// import from nonexistent file
|
||||
_, err = testParseOne(`localhost
|
||||
dir1
|
||||
proxy {
|
||||
import testdata/nonexistent_file
|
||||
transparent
|
||||
}`)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when importing a nonexistent file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAll(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
input string
|
||||
shouldErr bool
|
||||
keys [][]string // keys per server block, in order
|
||||
}{
|
||||
{`localhost`, false, [][]string{
|
||||
{"localhost"},
|
||||
}},
|
||||
|
||||
{`localhost:1234`, false, [][]string{
|
||||
{"localhost:1234"},
|
||||
}},
|
||||
|
||||
{`localhost:1234 {
|
||||
}
|
||||
localhost:2015 {
|
||||
}`, false, [][]string{
|
||||
{"localhost:1234"},
|
||||
{"localhost:2015"},
|
||||
}},
|
||||
|
||||
{`localhost:1234, http://host2`, false, [][]string{
|
||||
{"localhost:1234", "http://host2"},
|
||||
}},
|
||||
|
||||
{`localhost:1234, http://host2,`, true, [][]string{}},
|
||||
|
||||
{`http://host1.com, http://host2.com {
|
||||
}
|
||||
https://host3.com, https://host4.com {
|
||||
}`, false, [][]string{
|
||||
{"http://host1.com", "http://host2.com"},
|
||||
{"https://host3.com", "https://host4.com"},
|
||||
}},
|
||||
|
||||
{`import testdata/import_glob*.txt`, false, [][]string{
|
||||
{"glob0.host0"},
|
||||
{"glob0.host1"},
|
||||
{"glob1.host0"},
|
||||
{"glob2.host0"},
|
||||
}},
|
||||
|
||||
{`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches
|
||||
{`import notfound/file.conf`, true, [][]string{}}, // but a specific file should
|
||||
} {
|
||||
p := testParser(test.input)
|
||||
blocks, err := p.parseAll()
|
||||
|
||||
if test.shouldErr && err == nil {
|
||||
t.Errorf("Test %d: Expected an error, but didn't get one", i)
|
||||
}
|
||||
if !test.shouldErr && err != nil {
|
||||
t.Errorf("Test %d: Expected no error, but got: %v", i, err)
|
||||
}
|
||||
|
||||
if len(blocks) != len(test.keys) {
|
||||
t.Errorf("Test %d: Expected %d server blocks, got %d",
|
||||
i, len(test.keys), len(blocks))
|
||||
continue
|
||||
}
|
||||
for j, block := range blocks {
|
||||
if len(block.Keys) != len(test.keys[j]) {
|
||||
t.Errorf("Test %d: Expected %d keys in block %d, got %d",
|
||||
i, len(test.keys[j]), j, len(block.Keys))
|
||||
continue
|
||||
}
|
||||
for k, addr := range block.Keys {
|
||||
if addr != test.keys[j][k] {
|
||||
t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
|
||||
i, j, k, test.keys[j][k], addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnvironmentReplacement(t *testing.T) {
|
||||
os.Setenv("PORT", "8080")
|
||||
os.Setenv("ADDRESS", "servername.com")
|
||||
os.Setenv("FOOBAR", "foobar")
|
||||
os.Setenv("PARTIAL_DIR", "r1")
|
||||
|
||||
// basic test; unix-style env vars
|
||||
p := testParser(`{$ADDRESS}`)
|
||||
blocks, _ := p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], "servername.com"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// basic test; unix-style env vars
|
||||
p = testParser(`di{$PARTIAL_DIR}`)
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], "dir1"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// multiple vars per token
|
||||
p = testParser(`{$ADDRESS}:{$PORT}`)
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// windows-style var and unix style in same token
|
||||
p = testParser(`{%ADDRESS%}:{$PORT}`)
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// reverse order
|
||||
p = testParser(`{$ADDRESS}:{%PORT%}`)
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// env var in server block body as argument
|
||||
p = testParser(":{%PORT%}\ndir1 {$FOOBAR}")
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], ":8080"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
if actual, expected := blocks[0].Tokens["dir1"][1].Text, "foobar"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// combined windows env vars in argument
|
||||
p = testParser(":{%PORT%}\ndir1 {%ADDRESS%}/{%FOOBAR%}")
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Tokens["dir1"][1].Text, "servername.com/foobar"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// malformed env var (windows)
|
||||
p = testParser(":1234\ndir1 {%ADDRESS}")
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Tokens["dir1"][1].Text, "{%ADDRESS}"; expected != actual {
|
||||
t.Errorf("Expected host to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// malformed (non-existent) env var (unix)
|
||||
p = testParser(`:{$PORT$}`)
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Keys[0], ":"; expected != actual {
|
||||
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// in quoted field
|
||||
p = testParser(":1234\ndir1 \"Test {$FOOBAR} test\"")
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Tokens["dir1"][1].Text, "Test foobar test"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
// after end token
|
||||
p = testParser(":1234\nanswer \"{{ .Name }} {$FOOBAR}\"")
|
||||
blocks, _ = p.parseAll()
|
||||
if actual, expected := blocks[0].Tokens["answer"][1].Text, "{{ .Name }} foobar"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func testParser(input string) parser {
|
||||
return parser{Dispenser: newTestDispenser(input)}
|
||||
}
|
||||
|
||||
func TestSnippets(t *testing.T) {
|
||||
p := testParser(`
|
||||
(common) {
|
||||
gzip foo
|
||||
errors stderr
|
||||
}
|
||||
http://example.com {
|
||||
import common
|
||||
}
|
||||
`)
|
||||
blocks, err := p.parseAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, b := range blocks {
|
||||
t.Log(b.Keys)
|
||||
t.Log(b.Tokens)
|
||||
}
|
||||
if len(blocks) != 1 {
|
||||
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
|
||||
}
|
||||
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
|
||||
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
if len(blocks[0].Tokens) != 2 {
|
||||
t.Fatalf("Server block should have tokens from import")
|
||||
}
|
||||
if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
if actual, expected := blocks[0].Tokens["errors"][1].Text, "stderr"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) {
|
||||
file, err := ioutil.TempFile("", t.Name())
|
||||
if err != nil {
|
||||
panic(err) // get a stack trace so we know where this was called from.
|
||||
}
|
||||
if _, err := file.WriteString(str); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return file.Name()
|
||||
}
|
||||
|
||||
func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
|
||||
fileName := writeStringToTempFileOrDie(t, `
|
||||
http://example.com {
|
||||
# This isn't an import directive, it's just an arg with value 'import'
|
||||
basicauth / import password
|
||||
}
|
||||
`)
|
||||
// Parse the root file that imports the other one.
|
||||
p := testParser(`import ` + fileName)
|
||||
blocks, err := p.parseAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, b := range blocks {
|
||||
t.Log(b.Keys)
|
||||
t.Log(b.Tokens)
|
||||
}
|
||||
auth := blocks[0].Tokens["basicauth"]
|
||||
line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
|
||||
if line != "basicauth / import password" {
|
||||
// Previously, it would be changed to:
|
||||
// basicauth / import /path/to/test/dir/password
|
||||
// referencing a file that (probably) doesn't exist and changing the
|
||||
// password!
|
||||
t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnippetAcrossMultipleFiles(t *testing.T) {
|
||||
// Make the derived Caddyfile that expects (common) to be defined.
|
||||
fileName := writeStringToTempFileOrDie(t, `
|
||||
http://example.com {
|
||||
import common
|
||||
}
|
||||
`)
|
||||
|
||||
// Parse the root file that defines (common) and then imports the other one.
|
||||
p := testParser(`
|
||||
(common) {
|
||||
gzip foo
|
||||
}
|
||||
import ` + fileName + `
|
||||
`)
|
||||
|
||||
blocks, err := p.parseAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, b := range blocks {
|
||||
t.Log(b.Keys)
|
||||
t.Log(b.Tokens)
|
||||
}
|
||||
if len(blocks) != 1 {
|
||||
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
|
||||
}
|
||||
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
|
||||
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
if len(blocks[0].Tokens) != 1 {
|
||||
t.Fatalf("Server block should have tokens from import")
|
||||
}
|
||||
if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual {
|
||||
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
|
||||
}
|
||||
}
|
6
caddyconfig/caddyfile/testdata/import_glob0.txt
vendored
Executable file
6
caddyconfig/caddyfile/testdata/import_glob0.txt
vendored
Executable file
|
@ -0,0 +1,6 @@
|
|||
glob0.host0 {
|
||||
dir2 arg1
|
||||
}
|
||||
|
||||
glob0.host1 {
|
||||
}
|
4
caddyconfig/caddyfile/testdata/import_glob1.txt
vendored
Executable file
4
caddyconfig/caddyfile/testdata/import_glob1.txt
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
glob1.host0 {
|
||||
dir1
|
||||
dir2 arg1
|
||||
}
|
3
caddyconfig/caddyfile/testdata/import_glob2.txt
vendored
Executable file
3
caddyconfig/caddyfile/testdata/import_glob2.txt
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
glob2.host0 {
|
||||
dir2 arg1
|
||||
}
|
2
caddyconfig/caddyfile/testdata/import_test1.txt
vendored
Executable file
2
caddyconfig/caddyfile/testdata/import_test1.txt
vendored
Executable file
|
@ -0,0 +1,2 @@
|
|||
dir2 arg1 arg2
|
||||
dir3
|
4
caddyconfig/caddyfile/testdata/import_test2.txt
vendored
Executable file
4
caddyconfig/caddyfile/testdata/import_test2.txt
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
host1 {
|
||||
dir1
|
||||
dir2 arg1
|
||||
}
|
113
caddyconfig/configadapters.go
Normal file
113
caddyconfig/configadapters.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Adapter is a type which can adapt a configuration to Caddy JSON.
|
||||
// It returns the results and any warnings, or an error.
|
||||
type Adapter interface {
|
||||
Adapt(body []byte, options map[string]string) ([]byte, []Warning, error)
|
||||
}
|
||||
|
||||
// Warning represents a warning or notice related to conversion.
|
||||
type Warning struct {
|
||||
File string
|
||||
Line int
|
||||
Directive string
|
||||
Message string
|
||||
}
|
||||
|
||||
// JSON encodes val as JSON, returning it as a json.RawMessage. Any
|
||||
// marshaling errors (which are highly unlikely with correct code)
|
||||
// are converted to warnings. This is convenient when filling config
|
||||
// structs that require a json.RawMessage, without having to worry
|
||||
// about errors.
|
||||
func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
|
||||
b, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
if warnings != nil {
|
||||
*warnings = append(*warnings, Warning{Message: err.Error()})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// JSONModuleObject is like JSON, except it marshals val into a JSON object
|
||||
// and then adds a key to that object named fieldName with the value fieldVal.
|
||||
// This is useful for JSON-encoding module values where the module name has to
|
||||
// be described within the object by a certain key; for example,
|
||||
// "responder": "file_server" for a file server HTTP responder. The val must
|
||||
// encode into a map[string]interface{} (i.e. it must be a struct or map),
|
||||
// and any errors are converted into warnings, so this can be conveniently
|
||||
// used when filling a struct. For correct code, there should be no errors.
|
||||
func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
|
||||
// encode to a JSON object first
|
||||
enc, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
if warnings != nil {
|
||||
*warnings = append(*warnings, Warning{Message: err.Error()})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// then decode the object
|
||||
var tmp map[string]interface{}
|
||||
err = json.Unmarshal(enc, &tmp)
|
||||
if err != nil {
|
||||
if warnings != nil {
|
||||
*warnings = append(*warnings, Warning{Message: err.Error()})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// so we can easily add the module's field with its appointed value
|
||||
tmp[fieldName] = fieldVal
|
||||
|
||||
// then re-marshal as JSON
|
||||
result, err := json.Marshal(tmp)
|
||||
if err != nil {
|
||||
if warnings != nil {
|
||||
*warnings = append(*warnings, Warning{Message: err.Error()})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// JSONIndent is used to JSON-marshal the final resulting Caddy
|
||||
// configuration in a consistent, human-readable way.
|
||||
func JSONIndent(val interface{}) ([]byte, error) {
|
||||
return json.MarshalIndent(val, "", "\t")
|
||||
}
|
||||
|
||||
func RegisterAdapter(name string, adapter Adapter) error {
|
||||
if _, ok := configAdapters[name]; ok {
|
||||
return fmt.Errorf("%s: already registered", name)
|
||||
}
|
||||
configAdapters[name] = adapter
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetAdapter(name string) Adapter {
|
||||
return configAdapters[name]
|
||||
}
|
||||
|
||||
var configAdapters = make(map[string]Adapter)
|
350
caddyconfig/httpcaddyfile/addresses.go
Normal file
350
caddyconfig/httpcaddyfile/addresses.go
Normal file
|
@ -0,0 +1,350 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
"github.com/mholt/certmagic"
|
||||
)
|
||||
|
||||
// mapAddressToServerBlocks returns a map of listener address to list of server
|
||||
// blocks that will be served on that address. To do this, each server block is
|
||||
// expanded so that each one is considered individually, although keys of a
|
||||
// server block that share the same address stay grouped together so the config
|
||||
// isn't repeated unnecessarily. For example, this Caddyfile:
|
||||
//
|
||||
// example.com {
|
||||
// bind 127.0.0.1
|
||||
// }
|
||||
// www.example.com, example.net/path, localhost:9999 {
|
||||
// bind 127.0.0.1 1.2.3.4
|
||||
// }
|
||||
//
|
||||
// has two server blocks to start with. But expressed in this Caddyfile are
|
||||
// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999,
|
||||
// and 127.0.0.1:9999. This is because the bind directive is applied to each
|
||||
// key of its server block (specifying the host part), and each key may have
|
||||
// a different port. And we definitely need to be sure that a site which is
|
||||
// bound to be served on a specific interface is not served on others just
|
||||
// beceause that is more convenient: it would be a potential security risk
|
||||
// if the difference between interfaces means private vs. public.
|
||||
//
|
||||
// So what this function does for the example above is iterate each server
|
||||
// block, and for each server block, iterate its keys. For the first, it
|
||||
// finds one key (example.com) and determines its listener address
|
||||
// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds
|
||||
// the listener address to the map value returned by this function, with
|
||||
// the first server block as one of its associations.
|
||||
//
|
||||
// It then iterates each key on the second server block and associates them
|
||||
// with one or more listener addresses. Indeed, each key in this block has
|
||||
// two listener addresses because of the 'bind' directive. Once we know
|
||||
// which addresses serve which keys, we can create a new server block for
|
||||
// each address containing the contents of the server block and only those
|
||||
// specific keys of the server block which use that address.
|
||||
//
|
||||
// It is possible and even likely that some keys in the returned map have
|
||||
// the exact same list of server blocks (i.e. they are identical). This
|
||||
// happens when multiple hosts are declared with a 'bind' directive and
|
||||
// the resulting listener addresses are not shared by any other server
|
||||
// block (or the other server blocks are exactly identical in their token
|
||||
// contents). This happens with our example above because 1.2.3.4:443
|
||||
// and 1.2.3.4:9999 are used exclusively with the second server block. This
|
||||
// repetition may be undesirable, so call consolidateAddrMappings() to map
|
||||
// multiple addresses to the same lists of server blocks (a many:many mapping).
|
||||
// (Doing this is essentially a map-reduce technique.)
|
||||
func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.ServerBlock) (map[string][]caddyfile.ServerBlock, error) {
|
||||
sbmap := make(map[string][]caddyfile.ServerBlock)
|
||||
|
||||
for i, sblock := range originalServerBlocks {
|
||||
// within a server block, we need to map all the listener addresses
|
||||
// implied by the server block to the keys of the server block which
|
||||
// will be served by them; this has the effect of treating each
|
||||
// key of a server block as its own, but without having to repeat its
|
||||
// contents in cases where multiple keys really can be served together
|
||||
addrToKeys := make(map[string][]string)
|
||||
for j, key := range sblock.Keys {
|
||||
// a key can have multiple listener addresses if there are multiple
|
||||
// arguments to the 'bind' directive (although they will all have
|
||||
// the same port, since the port is defined by the key or is implicit
|
||||
// through automatic HTTPS)
|
||||
addrs, err := st.listenerAddrsForServerBlockKey(sblock, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err)
|
||||
}
|
||||
|
||||
// associate this key with each listener address it is served on
|
||||
for _, addr := range addrs {
|
||||
addrToKeys[addr] = append(addrToKeys[addr], key)
|
||||
}
|
||||
}
|
||||
|
||||
// now that we know which addresses serve which keys of this
|
||||
// server block, we iterate that mapping and create a list of
|
||||
// new server blocks for each address where the keys of the
|
||||
// server block are only the ones which use the address; but
|
||||
// the contents (tokens) are of course the same
|
||||
for addr, keys := range addrToKeys {
|
||||
sbmap[addr] = append(sbmap[addr], caddyfile.ServerBlock{
|
||||
Keys: keys,
|
||||
Tokens: sblock.Tokens,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return sbmap, nil
|
||||
}
|
||||
|
||||
// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
|
||||
// single listener addresses to lists of server blocks. Since multiple addresses may serve
|
||||
// identical sites (server block contents), this function turns a 1:many mapping into a
|
||||
// many:many mapping. Server block contents (tokens) must be exactly identical so that
|
||||
// reflect.DeepEqual returns true in order for the addresses to be combined. Identical
|
||||
// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
|
||||
// association from multiple addresses to multiple server blocks; i.e. each element of
|
||||
// the returned slice) becomes a server definition in the output JSON.
|
||||
func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]caddyfile.ServerBlock) []sbAddrAssociation {
|
||||
var sbaddrs []sbAddrAssociation
|
||||
for addr, sblocks := range addrToServerBlocks {
|
||||
// we start with knowing that at least this address
|
||||
// maps to these server blocks
|
||||
a := sbAddrAssociation{
|
||||
addresses: []string{addr},
|
||||
serverBlocks: sblocks,
|
||||
}
|
||||
|
||||
// now find other addresses that map to identical
|
||||
// server blocks and add them to our list of
|
||||
// addresses, while removing them from the map
|
||||
for otherAddr, otherSblocks := range addrToServerBlocks {
|
||||
if addr == otherAddr {
|
||||
continue
|
||||
}
|
||||
if reflect.DeepEqual(sblocks, otherSblocks) {
|
||||
a.addresses = append(a.addresses, otherAddr)
|
||||
delete(addrToServerBlocks, otherAddr)
|
||||
}
|
||||
}
|
||||
|
||||
sbaddrs = append(sbaddrs, a)
|
||||
}
|
||||
return sbaddrs
|
||||
}
|
||||
|
||||
func (st *ServerType) listenerAddrsForServerBlockKey(sblock caddyfile.ServerBlock, key string) ([]string, error) {
|
||||
addr, err := standardizeAddress(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing key: %v", err)
|
||||
}
|
||||
|
||||
lnPort := defaultPort
|
||||
if addr.Port != "" {
|
||||
// port explicitly defined
|
||||
lnPort = addr.Port
|
||||
} else if certmagic.HostQualifies(addr.Host) {
|
||||
// automatic HTTPS
|
||||
lnPort = strconv.Itoa(certmagic.HTTPSPort)
|
||||
}
|
||||
|
||||
// the bind directive specifies hosts, but is optional
|
||||
var lnHosts []string
|
||||
for i, token := range sblock.Tokens["bind"] {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
lnHosts = append(lnHosts, token.Text)
|
||||
}
|
||||
if len(lnHosts) == 0 {
|
||||
lnHosts = []string{""}
|
||||
}
|
||||
|
||||
// use a map to prevent duplication
|
||||
listeners := make(map[string]struct{})
|
||||
for _, host := range lnHosts {
|
||||
listeners[net.JoinHostPort(host, lnPort)] = struct{}{}
|
||||
}
|
||||
|
||||
// now turn map into list
|
||||
var listenersList []string
|
||||
for lnStr := range listeners {
|
||||
listenersList = append(listenersList, lnStr)
|
||||
}
|
||||
// sort.Strings(listenersList) // TODO: is sorting necessary?
|
||||
|
||||
return listenersList, nil
|
||||
}
|
||||
|
||||
// Address represents a site address. It contains
|
||||
// the original input value, and the component
|
||||
// parts of an address. The component parts may be
|
||||
// updated to the correct values as setup proceeds,
|
||||
// but the original value should never be changed.
|
||||
//
|
||||
// The Host field must be in a normalized form.
|
||||
type Address struct {
|
||||
Original, Scheme, Host, Port, Path string
|
||||
}
|
||||
|
||||
// String returns a human-friendly print of the address.
|
||||
func (a Address) String() string {
|
||||
if a.Host == "" && a.Port == "" {
|
||||
return ""
|
||||
}
|
||||
scheme := a.Scheme
|
||||
if scheme == "" {
|
||||
if a.Port == strconv.Itoa(certmagic.HTTPSPort) {
|
||||
scheme = "https"
|
||||
} else {
|
||||
scheme = "http"
|
||||
}
|
||||
}
|
||||
s := scheme
|
||||
if s != "" {
|
||||
s += "://"
|
||||
}
|
||||
if a.Port != "" &&
|
||||
((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) ||
|
||||
(scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) {
|
||||
s += net.JoinHostPort(a.Host, a.Port)
|
||||
} else {
|
||||
s += a.Host
|
||||
}
|
||||
if a.Path != "" {
|
||||
s += a.Path
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// VHost returns a sensible concatenation of Host:Port/Path from a.
|
||||
// It's basically the a.Original but without the scheme.
|
||||
func (a Address) VHost() string {
|
||||
if idx := strings.Index(a.Original, "://"); idx > -1 {
|
||||
return a.Original[idx+3:]
|
||||
}
|
||||
return a.Original
|
||||
}
|
||||
|
||||
// Normalize normalizes URL: turn scheme and host names into lower case
|
||||
func (a Address) Normalize() Address {
|
||||
path := a.Path
|
||||
if !caseSensitivePath {
|
||||
path = strings.ToLower(path)
|
||||
}
|
||||
|
||||
// ensure host is normalized if it's an IP address
|
||||
host := a.Host
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
return Address{
|
||||
Original: a.Original,
|
||||
Scheme: strings.ToLower(a.Scheme),
|
||||
Host: strings.ToLower(host),
|
||||
Port: a.Port,
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
|
||||
// Key is similar to String, just replaces scheme and host values with modified values.
|
||||
// Unlike String it doesn't add anything default (scheme, port, etc)
|
||||
func (a Address) Key() string {
|
||||
res := ""
|
||||
if a.Scheme != "" {
|
||||
res += a.Scheme + "://"
|
||||
}
|
||||
if a.Host != "" {
|
||||
res += a.Host
|
||||
}
|
||||
if a.Port != "" {
|
||||
if strings.HasPrefix(a.Original[len(res):], ":"+a.Port) {
|
||||
// insert port only if the original has its own explicit port
|
||||
res += ":" + a.Port
|
||||
}
|
||||
}
|
||||
if a.Path != "" {
|
||||
res += a.Path
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// standardizeAddress parses an address string into a structured format with separate
|
||||
// scheme, host, port, and path portions, as well as the original input string.
|
||||
func standardizeAddress(str string) (Address, error) {
|
||||
httpPort, httpsPort := strconv.Itoa(certmagic.HTTPPort), strconv.Itoa(certmagic.HTTPSPort)
|
||||
|
||||
input := str
|
||||
|
||||
// Split input into components (prepend with // to assert host by default)
|
||||
if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") {
|
||||
str = "//" + str
|
||||
}
|
||||
u, err := url.Parse(str)
|
||||
if err != nil {
|
||||
return Address{}, err
|
||||
}
|
||||
|
||||
// separate host and port
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
host, port, err = net.SplitHostPort(u.Host + ":")
|
||||
if err != nil {
|
||||
host = u.Host
|
||||
}
|
||||
}
|
||||
|
||||
// see if we can set port based off scheme
|
||||
if port == "" {
|
||||
if u.Scheme == "http" {
|
||||
port = httpPort
|
||||
} else if u.Scheme == "https" {
|
||||
port = httpsPort
|
||||
}
|
||||
}
|
||||
|
||||
// repeated or conflicting scheme is confusing, so error
|
||||
if u.Scheme != "" && (port == "http" || port == "https") {
|
||||
return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input)
|
||||
}
|
||||
|
||||
// error if scheme and port combination violate convention
|
||||
if (u.Scheme == "http" && port == httpsPort) || (u.Scheme == "https" && port == httpPort) {
|
||||
return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
|
||||
}
|
||||
|
||||
// standardize http and https ports to their respective port numbers
|
||||
if port == "http" {
|
||||
u.Scheme = "http"
|
||||
port = httpPort
|
||||
} else if port == "https" {
|
||||
u.Scheme = "https"
|
||||
port = httpsPort
|
||||
}
|
||||
|
||||
return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "2015"
|
||||
caseSensitivePath = false
|
||||
)
|
129
caddyconfig/httpcaddyfile/addresses_test.go
Normal file
129
caddyconfig/httpcaddyfile/addresses_test.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpcaddyfile
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestStandardizeAddress(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
input string
|
||||
scheme, host, port, path string
|
||||
shouldErr bool
|
||||
}{
|
||||
{`localhost`, "", "localhost", "", "", false},
|
||||
{`localhost:1234`, "", "localhost", "1234", "", false},
|
||||
{`localhost:`, "", "localhost", "", "", false},
|
||||
{`0.0.0.0`, "", "0.0.0.0", "", "", false},
|
||||
{`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false},
|
||||
{`:1234`, "", "", "1234", "", false},
|
||||
{`[::1]`, "", "::1", "", "", false},
|
||||
{`[::1]:1234`, "", "::1", "1234", "", false},
|
||||
{`:`, "", "", "", "", false},
|
||||
{`localhost:http`, "http", "localhost", "80", "", false},
|
||||
{`localhost:https`, "https", "localhost", "443", "", false},
|
||||
{`:http`, "http", "", "80", "", false},
|
||||
{`:https`, "https", "", "443", "", false},
|
||||
{`http://localhost:https`, "", "", "", "", true}, // conflict
|
||||
{`http://localhost:http`, "", "", "", "", true}, // repeated scheme
|
||||
{`http://localhost:443`, "", "", "", "", true}, // not conventional
|
||||
{`https://localhost:80`, "", "", "", "", true}, // not conventional
|
||||
{`http://localhost`, "http", "localhost", "80", "", false},
|
||||
{`https://localhost`, "https", "localhost", "443", "", false},
|
||||
{`http://127.0.0.1`, "http", "127.0.0.1", "80", "", false},
|
||||
{`https://127.0.0.1`, "https", "127.0.0.1", "443", "", false},
|
||||
{`http://[::1]`, "http", "::1", "80", "", false},
|
||||
{`http://localhost:1234`, "http", "localhost", "1234", "", false},
|
||||
{`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false},
|
||||
{`http://[::1]:1234`, "http", "::1", "1234", "", false},
|
||||
{``, "", "", "", "", false},
|
||||
{`::1`, "", "::1", "", "", true},
|
||||
{`localhost::`, "", "localhost::", "", "", true},
|
||||
{`#$%@`, "", "", "", "", true},
|
||||
{`host/path`, "", "host", "", "/path", false},
|
||||
{`http://host/`, "http", "host", "80", "/", false},
|
||||
{`//asdf`, "", "asdf", "", "", false},
|
||||
{`:1234/asdf`, "", "", "1234", "/asdf", false},
|
||||
{`http://host/path`, "http", "host", "80", "/path", false},
|
||||
{`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false},
|
||||
{`host:80/path`, "", "host", "80", "/path", false},
|
||||
{`host:https/path`, "https", "host", "443", "/path", false},
|
||||
{`/path`, "", "", "", "/path", false},
|
||||
} {
|
||||
actual, err := standardizeAddress(test.input)
|
||||
|
||||
if err != nil && !test.shouldErr {
|
||||
t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err)
|
||||
}
|
||||
if err == nil && test.shouldErr {
|
||||
t.Errorf("Test %d (%s): Expected error, but had none", i, test.input)
|
||||
}
|
||||
|
||||
if !test.shouldErr && actual.Original != test.input {
|
||||
t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original)
|
||||
}
|
||||
if actual.Scheme != test.scheme {
|
||||
t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme)
|
||||
}
|
||||
if actual.Host != test.host {
|
||||
t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host)
|
||||
}
|
||||
if actual.Port != test.port {
|
||||
t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port)
|
||||
}
|
||||
if actual.Path != test.path {
|
||||
t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressVHost(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
addr Address
|
||||
expected string
|
||||
}{
|
||||
{Address{Original: "host:1234"}, "host:1234"},
|
||||
{Address{Original: "host:1234/foo"}, "host:1234/foo"},
|
||||
{Address{Original: "host/foo"}, "host/foo"},
|
||||
{Address{Original: "http://host/foo"}, "host/foo"},
|
||||
{Address{Original: "https://host/foo"}, "host/foo"},
|
||||
} {
|
||||
actual := test.addr.VHost()
|
||||
if actual != test.expected {
|
||||
t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressString(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
addr Address
|
||||
expected string
|
||||
}{
|
||||
{Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"},
|
||||
{Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"},
|
||||
{Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"},
|
||||
{Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"},
|
||||
{Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"},
|
||||
{Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"},
|
||||
{Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"},
|
||||
{Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"},
|
||||
{Address{Scheme: "", Host: "", Port: "", Path: ""}, ""},
|
||||
} {
|
||||
actual := test.addr.String()
|
||||
if actual != test.expected {
|
||||
t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
257
caddyconfig/httpcaddyfile/builtins.go
Normal file
257
caddyconfig/httpcaddyfile/builtins.go
Normal file
|
@ -0,0 +1,257 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
||||
)
|
||||
|
||||
func (st *ServerType) parseRoot(
|
||||
tkns []caddyfile.Token,
|
||||
matcherDefs map[string]map[string]json.RawMessage,
|
||||
warnings *[]caddyconfig.Warning,
|
||||
) ([]caddyhttp.Route, error) {
|
||||
var routes []caddyhttp.Route
|
||||
|
||||
matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, mst := range matchersAndTokens {
|
||||
d := caddyfile.NewDispenser("Caddyfile", mst.tokens)
|
||||
|
||||
var root string
|
||||
for d.Next() {
|
||||
if !d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
root = d.Val()
|
||||
if d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
varsHandler := caddyhttp.VarsMiddleware{"root": root}
|
||||
route := caddyhttp.Route{
|
||||
Handle: []json.RawMessage{
|
||||
caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", warnings),
|
||||
},
|
||||
}
|
||||
if mst.matcherSet != nil {
|
||||
route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
|
||||
}
|
||||
|
||||
routes = append(routes, route)
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) parseRedir(
|
||||
tkns []caddyfile.Token,
|
||||
matcherDefs map[string]map[string]json.RawMessage,
|
||||
warnings *[]caddyconfig.Warning,
|
||||
) ([]caddyhttp.Route, error) {
|
||||
var routes []caddyhttp.Route
|
||||
|
||||
matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, mst := range matchersAndTokens {
|
||||
var route caddyhttp.Route
|
||||
|
||||
d := caddyfile.NewDispenser("Caddyfile", mst.tokens)
|
||||
|
||||
for d.Next() {
|
||||
if !d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
to := d.Val()
|
||||
|
||||
var code string
|
||||
if d.NextArg() {
|
||||
code = d.Val()
|
||||
}
|
||||
if code == "permanent" {
|
||||
code = "301"
|
||||
}
|
||||
if code == "temporary" || code == "" {
|
||||
code = "307"
|
||||
}
|
||||
var body string
|
||||
if code == "meta" {
|
||||
// Script tag comes first since that will better imitate a redirect in the browser's
|
||||
// history, but the meta tag is a fallback for most non-JS clients.
|
||||
const metaRedir = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Redirecting...</title>
|
||||
<script>window.location.replace("%s");</script>
|
||||
<meta http-equiv="refresh" content="0; URL='%s'">
|
||||
</head>
|
||||
<body>Redirecting to <a href="%s">%s</a>...</body>
|
||||
</html>
|
||||
`
|
||||
safeTo := html.EscapeString(to)
|
||||
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
|
||||
}
|
||||
|
||||
handler := caddyhttp.StaticResponse{
|
||||
StatusCode: caddyhttp.WeakString(code),
|
||||
Headers: http.Header{"Location": []string{to}},
|
||||
Body: body,
|
||||
}
|
||||
|
||||
route.Handle = append(route.Handle,
|
||||
caddyconfig.JSONModuleObject(handler, "handler", "static_response", warnings))
|
||||
}
|
||||
|
||||
if mst.matcherSet != nil {
|
||||
route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
|
||||
}
|
||||
|
||||
routes = append(routes, route)
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) parseTLSAutomationManager(d *caddyfile.Dispenser) (caddytls.ACMEManagerMaker, error) {
|
||||
var m caddytls.ACMEManagerMaker
|
||||
|
||||
for d.Next() {
|
||||
firstLine := d.RemainingArgs()
|
||||
if len(firstLine) == 1 && firstLine[0] != "off" {
|
||||
m.Email = firstLine[0]
|
||||
}
|
||||
|
||||
var hasBlock bool
|
||||
for d.NextBlock() {
|
||||
hasBlock = true
|
||||
switch d.Val() {
|
||||
case "ca":
|
||||
arg := d.RemainingArgs()
|
||||
if len(arg) != 1 {
|
||||
return m, d.ArgErr()
|
||||
}
|
||||
m.CA = arg[0]
|
||||
// TODO: other properties
|
||||
}
|
||||
}
|
||||
|
||||
// a naked tls directive is not allowed
|
||||
if len(firstLine) == 0 && !hasBlock {
|
||||
return m, d.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) parseTLSCerts(d *caddyfile.Dispenser) (map[string]caddytls.CertificateLoader, error) {
|
||||
var fileLoader caddytls.FileLoader
|
||||
var folderLoader caddytls.FolderLoader
|
||||
|
||||
for d.Next() {
|
||||
// file loader
|
||||
firstLine := d.RemainingArgs()
|
||||
if len(firstLine) == 2 {
|
||||
fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
|
||||
Certificate: firstLine[0],
|
||||
Key: firstLine[1],
|
||||
// TODO: tags, for enterprise module's certificate selection
|
||||
})
|
||||
}
|
||||
|
||||
// folder loader
|
||||
for d.NextBlock() {
|
||||
if d.Val() == "load" {
|
||||
folderLoader = append(folderLoader, d.RemainingArgs()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// put configured loaders into the map
|
||||
loaders := make(map[string]caddytls.CertificateLoader)
|
||||
if len(fileLoader) > 0 {
|
||||
loaders["load_files"] = fileLoader
|
||||
}
|
||||
if len(folderLoader) > 0 {
|
||||
loaders["load_folders"] = folderLoader
|
||||
}
|
||||
|
||||
return loaders, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) parseTLSConnPolicy(d *caddyfile.Dispenser) (*caddytls.ConnectionPolicy, error) {
|
||||
cp := new(caddytls.ConnectionPolicy)
|
||||
|
||||
for d.Next() {
|
||||
for d.NextBlock() {
|
||||
switch d.Val() {
|
||||
case "protocols":
|
||||
args := d.RemainingArgs()
|
||||
if len(args) == 0 {
|
||||
return nil, d.SyntaxErr("one or two protocols")
|
||||
}
|
||||
if len(args) > 0 {
|
||||
if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
|
||||
return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
|
||||
}
|
||||
cp.ProtocolMin = args[0]
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
|
||||
return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
|
||||
}
|
||||
cp.ProtocolMax = args[1]
|
||||
}
|
||||
case "ciphers":
|
||||
for d.NextArg() {
|
||||
if _, ok := caddytls.SupportedCipherSuites[d.Val()]; !ok {
|
||||
return nil, d.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", d.Val())
|
||||
}
|
||||
cp.CipherSuites = append(cp.CipherSuites, d.Val())
|
||||
}
|
||||
case "curves":
|
||||
for d.NextArg() {
|
||||
if _, ok := caddytls.SupportedCurves[d.Val()]; !ok {
|
||||
return nil, d.Errf("Wrong curve name or curve not supported: '%s'", d.Val())
|
||||
}
|
||||
cp.Curves = append(cp.Curves, d.Val())
|
||||
}
|
||||
case "alpn":
|
||||
args := d.RemainingArgs()
|
||||
if len(args) == 0 {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
cp.ALPN = args
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
92
caddyconfig/httpcaddyfile/handlers.go
Normal file
92
caddyconfig/httpcaddyfile/handlers.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
)
|
||||
|
||||
func (st *ServerType) parseMatcherDefinitions(d *caddyfile.Dispenser) (map[string]map[string]json.RawMessage, error) {
|
||||
matchers := make(map[string]map[string]json.RawMessage)
|
||||
for d.Next() {
|
||||
definitionName := d.Val()
|
||||
for d.NextBlock() {
|
||||
matcherName := d.Val()
|
||||
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
|
||||
}
|
||||
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
||||
}
|
||||
err = unm.UnmarshalCaddyfile(d.NewFromNextTokens())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm, ok := unm.(caddyhttp.RequestMatcher)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
||||
}
|
||||
if _, ok := matchers[definitionName]; !ok {
|
||||
matchers[definitionName] = make(map[string]json.RawMessage)
|
||||
}
|
||||
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
|
||||
}
|
||||
}
|
||||
return matchers, nil
|
||||
}
|
||||
|
||||
// directiveBuckets returns a list of middleware/handler directives.
|
||||
// Buckets are ordered, and directives should be evaluated in their
|
||||
// bucket order. Within a bucket, directives are not ordered. Hence,
|
||||
// the return value has a slice of buckets, where each bucket is a
|
||||
// map, which is a strongly-typed reminder that directives within a
|
||||
// bucket are not ordered.
|
||||
func directiveBuckets() []map[string]struct{} {
|
||||
directiveBuckets := []map[string]struct{}{
|
||||
// prefer odd-numbered buckets; evens are there for contingencies
|
||||
{}, // 0
|
||||
{}, // 1 - keep empty unless necessary
|
||||
{}, // 2
|
||||
{}, // 3 - first handlers, last responders
|
||||
{}, // 4
|
||||
{}, // 5 - middle of chain
|
||||
{}, // 6
|
||||
{}, // 7 - last handlers, first responders
|
||||
{}, // 8
|
||||
{}, // 9 - keep empty unless necessary
|
||||
{}, // 10
|
||||
}
|
||||
for _, mod := range caddy.GetModules("http.handlers") {
|
||||
if hd, ok := mod.New().(HandlerDirective); ok {
|
||||
bucket := hd.Bucket()
|
||||
if bucket < 0 || bucket >= len(directiveBuckets) {
|
||||
log.Printf("[ERROR] directive %s: bucket out of range [0-%d): %d; skipping",
|
||||
mod.Name, len(directiveBuckets), bucket)
|
||||
continue
|
||||
}
|
||||
directiveBuckets[bucket][mod.ID()] = struct{}{}
|
||||
}
|
||||
}
|
||||
return directiveBuckets
|
||||
}
|
542
caddyconfig/httpcaddyfile/httptype.go
Normal file
542
caddyconfig/httpcaddyfile/httptype.go
Normal file
|
@ -0,0 +1,542 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/certmagic"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
||||
)
|
||||
|
||||
func init() {
|
||||
caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
|
||||
}
|
||||
|
||||
// ServerType can set up a config from an HTTP Caddyfile.
|
||||
type ServerType struct {
|
||||
}
|
||||
|
||||
// ValidDirectives returns the list of known directives.
|
||||
func (ServerType) ValidDirectives() []string {
|
||||
dirs := []string{"matcher", "root", "tls", "redir"} // TODO: put special-case (hard-coded, or non-handler) directives here
|
||||
for _, mod := range caddy.GetModules("http.handlers") {
|
||||
if _, ok := mod.New().(HandlerDirective); ok {
|
||||
dirs = append(dirs, mod.ID())
|
||||
}
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Setup makes a config from the tokens.
|
||||
func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
|
||||
options map[string]string) (*caddy.Config, []caddyconfig.Warning, error) {
|
||||
var warnings []caddyconfig.Warning
|
||||
|
||||
// map
|
||||
sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks)
|
||||
if err != nil {
|
||||
return nil, warnings, err
|
||||
}
|
||||
|
||||
// reduce
|
||||
pairings := st.consolidateAddrMappings(sbmap)
|
||||
|
||||
// each pairing of listener addresses to list of server
|
||||
// blocks is basically a server definition
|
||||
servers, err := st.serversFromPairings(pairings, &warnings)
|
||||
if err != nil {
|
||||
return nil, warnings, err
|
||||
}
|
||||
|
||||
// now that each server is configured, make the HTTP app
|
||||
httpApp := caddyhttp.App{
|
||||
HTTPPort: tryInt(options["http-port"], &warnings),
|
||||
HTTPSPort: tryInt(options["https-port"], &warnings),
|
||||
Servers: servers,
|
||||
}
|
||||
|
||||
// now for the TLS app! (TODO: refactor into own func)
|
||||
tlsApp := caddytls.TLS{Certificates: make(map[string]json.RawMessage)}
|
||||
for _, p := range pairings {
|
||||
for _, sblock := range p.serverBlocks {
|
||||
if tkns, ok := sblock.Tokens["tls"]; ok {
|
||||
// extract all unique hostnames from the server block
|
||||
// keys, then convert to a slice for use in the TLS app
|
||||
hostMap := make(map[string]struct{})
|
||||
for _, sblockKey := range sblock.Keys {
|
||||
addr, err := standardizeAddress(sblockKey)
|
||||
if err != nil {
|
||||
return nil, warnings, fmt.Errorf("parsing server block key: %v", err)
|
||||
}
|
||||
hostMap[addr.Host] = struct{}{}
|
||||
}
|
||||
sblockHosts := make([]string, 0, len(hostMap))
|
||||
for host := range hostMap {
|
||||
sblockHosts = append(sblockHosts, host)
|
||||
}
|
||||
|
||||
// parse tokens to get ACME manager config
|
||||
acmeMgr, err := st.parseTLSAutomationManager(caddyfile.NewDispenser("Caddyfile", tkns))
|
||||
if err != nil {
|
||||
return nil, warnings, err
|
||||
}
|
||||
|
||||
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, caddytls.AutomationPolicy{
|
||||
Hosts: sblockHosts,
|
||||
ManagementRaw: caddyconfig.JSONModuleObject(acmeMgr, "module", "acme", &warnings),
|
||||
})
|
||||
|
||||
// parse tokens to get certificates to be loaded manually
|
||||
certLoaders, err := st.parseTLSCerts(caddyfile.NewDispenser("Caddyfile", tkns))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for loaderName, loader := range certLoaders {
|
||||
tlsApp.Certificates[loaderName] = caddyconfig.JSON(loader, &warnings)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// annnd the top-level config, then we're done!
|
||||
cfg := &caddy.Config{AppsRaw: make(map[string]json.RawMessage)}
|
||||
if !reflect.DeepEqual(httpApp, caddyhttp.App{}) {
|
||||
cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
|
||||
}
|
||||
if !reflect.DeepEqual(tlsApp, caddytls.TLS{}) {
|
||||
cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
|
||||
}
|
||||
|
||||
return cfg, warnings, nil
|
||||
}
|
||||
|
||||
// hostsFromServerBlockKeys returns a list of all the
|
||||
// hostnames found in the keys of the server block sb.
|
||||
// The list may not be in a consistent order.
|
||||
func (st *ServerType) hostsFromServerBlockKeys(sb caddyfile.ServerBlock) ([]string, error) {
|
||||
// first get each unique hostname
|
||||
hostMap := make(map[string]struct{})
|
||||
for _, sblockKey := range sb.Keys {
|
||||
addr, err := standardizeAddress(sblockKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing server block key: %v", err)
|
||||
}
|
||||
hostMap[addr.Host] = struct{}{}
|
||||
}
|
||||
|
||||
// convert map to slice
|
||||
sblockHosts := make([]string, 0, len(hostMap))
|
||||
for host := range hostMap {
|
||||
sblockHosts = append(sblockHosts, host)
|
||||
}
|
||||
|
||||
return sblockHosts, nil
|
||||
}
|
||||
|
||||
// serversFromPairings creates the servers for each pairing of addresses
|
||||
// to server blocks. Each pairing is essentially a server definition.
|
||||
func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings *[]caddyconfig.Warning) (map[string]*caddyhttp.Server, error) {
|
||||
servers := make(map[string]*caddyhttp.Server)
|
||||
|
||||
for i, p := range pairings {
|
||||
srv := &caddyhttp.Server{
|
||||
Listen: p.addresses,
|
||||
}
|
||||
|
||||
for _, sblock := range p.serverBlocks {
|
||||
matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.Keys, err)
|
||||
}
|
||||
|
||||
// extract matcher definitions
|
||||
d := caddyfile.NewDispenser("Caddyfile", sblock.Tokens["matcher"])
|
||||
matcherDefs, err := st.parseMatcherDefinitions(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
siteVarSubroute, handlerSubroute := new(caddyhttp.Subroute), new(caddyhttp.Subroute)
|
||||
|
||||
// built-in directives
|
||||
|
||||
// root: path to root of site
|
||||
if tkns, ok := sblock.Tokens["root"]; ok {
|
||||
routes, err := st.parseRoot(tkns, matcherDefs, warnings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
siteVarSubroute.Routes = append(siteVarSubroute.Routes, routes...)
|
||||
}
|
||||
|
||||
// tls: off and conn policies
|
||||
if tkns, ok := sblock.Tokens["tls"]; ok {
|
||||
// get the hosts for this server block...
|
||||
hosts, err := st.hostsFromServerBlockKeys(sblock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ...and of those, which ones qualify for auto HTTPS
|
||||
var autoHTTPSQualifiedHosts []string
|
||||
for _, h := range hosts {
|
||||
if certmagic.HostQualifies(h) {
|
||||
autoHTTPSQualifiedHosts = append(autoHTTPSQualifiedHosts, h)
|
||||
}
|
||||
}
|
||||
|
||||
if len(tkns) == 2 && tkns[1].Text == "off" {
|
||||
// tls off: disable TLS (and automatic HTTPS) for server block's names
|
||||
if srv.AutoHTTPS == nil {
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, autoHTTPSQualifiedHosts...)
|
||||
} else {
|
||||
// tls connection policies
|
||||
cp, err := st.parseTLSConnPolicy(caddyfile.NewDispenser("Caddyfile", tkns))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: are matchers needed if every hostname of the config is matched?
|
||||
cp.Matchers = map[string]json.RawMessage{
|
||||
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
|
||||
}
|
||||
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
|
||||
}
|
||||
}
|
||||
|
||||
// set up each handler directive
|
||||
for _, dirBucket := range directiveBuckets() {
|
||||
for dir := range dirBucket {
|
||||
// keep in mind that multiple occurrences of the directive may appear here
|
||||
tkns, ok := sblock.Tokens[dir]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// extract matcher sets from matcher tokens, if any
|
||||
matcherSetsMap, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
|
||||
|
||||
mod, err := caddy.GetModule("http.handlers." + dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting handler module '%s': %v", mod.Name, err)
|
||||
}
|
||||
|
||||
// the tokens have been divided by matcher set for us,
|
||||
// so iterate each one and set them up
|
||||
for _, mst := range matcherSetsMap {
|
||||
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("handler module '%s' is not a Caddyfile unmarshaler", mod.Name)
|
||||
}
|
||||
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(d.File(), mst.tokens))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
handler, ok := unm.(caddyhttp.MiddlewareHandler)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("handler module '%s' does not implement caddyhttp.MiddlewareHandler interface", mod.Name)
|
||||
}
|
||||
|
||||
route := caddyhttp.Route{
|
||||
Handle: []json.RawMessage{
|
||||
caddyconfig.JSONModuleObject(handler, "handler", dir, warnings),
|
||||
},
|
||||
}
|
||||
if mst.matcherSet != nil {
|
||||
route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
|
||||
}
|
||||
handlerSubroute.Routes = append(handlerSubroute.Routes, route)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// redir: static responses that redirect
|
||||
if tkns, ok := sblock.Tokens["redir"]; ok {
|
||||
routes, err := st.parseRedir(tkns, matcherDefs, warnings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
handlerSubroute.Routes = append(handlerSubroute.Routes, routes...)
|
||||
}
|
||||
|
||||
// the route that contains the site's handlers will
|
||||
// be assumed to be the sub-route for this site...
|
||||
siteSubroute := handlerSubroute
|
||||
|
||||
// ... unless, of course, there are variables that might
|
||||
// be used by the site's matchers or handlers, in which
|
||||
// case we need to nest the handlers in a sub-sub-route,
|
||||
// and the variables go in the sub-route so the variables
|
||||
// get evaluated first
|
||||
if len(siteVarSubroute.Routes) > 0 {
|
||||
subSubRoute := caddyhttp.Subroute{Routes: siteSubroute.Routes}
|
||||
siteSubroute.Routes = append(
|
||||
siteVarSubroute.Routes,
|
||||
caddyhttp.Route{
|
||||
Handle: []json.RawMessage{
|
||||
caddyconfig.JSONModuleObject(subSubRoute, "handler", "subroute", warnings),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
siteSubroute.Routes = consolidateRoutes(siteSubroute.Routes)
|
||||
|
||||
srv.Routes = append(srv.Routes, caddyhttp.Route{
|
||||
MatcherSets: matcherSetsEnc,
|
||||
Handle: []json.RawMessage{
|
||||
caddyconfig.JSONModuleObject(siteSubroute, "handler", "subroute", warnings),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
srv.Routes = consolidateRoutes(srv.Routes)
|
||||
|
||||
servers[fmt.Sprintf("srv%d", i)] = srv
|
||||
}
|
||||
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
// consolidateRoutes combines routes with the same properties
|
||||
// (same matchers, same Terminal and Group settings) for a
|
||||
// cleaner overall output.
|
||||
func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
|
||||
for i := 0; i < len(routes)-1; i++ {
|
||||
if reflect.DeepEqual(routes[i].MatcherSets, routes[i+1].MatcherSets) &&
|
||||
routes[i].Terminal == routes[i+1].Terminal &&
|
||||
routes[i].Group == routes[i+1].Group {
|
||||
// keep the handlers in the same order, then splice out repetitive route
|
||||
routes[i].Handle = append(routes[i].Handle, routes[i+1].Handle...)
|
||||
routes = append(routes[:i+1], routes[i+2:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
return routes
|
||||
}
|
||||
|
||||
func (st *ServerType) tokensToMatcherSets(
|
||||
tkns []caddyfile.Token,
|
||||
matcherDefs map[string]map[string]json.RawMessage,
|
||||
warnings *[]caddyconfig.Warning,
|
||||
) (map[string]matcherSetAndTokens, error) {
|
||||
m := make(map[string]matcherSetAndTokens)
|
||||
|
||||
for len(tkns) > 0 {
|
||||
d := caddyfile.NewDispenser("Caddyfile", tkns)
|
||||
d.Next() // consume directive token
|
||||
|
||||
// look for matcher; it should be the next argument
|
||||
var matcherToken caddyfile.Token
|
||||
var matcherSet map[string]json.RawMessage
|
||||
if d.NextArg() {
|
||||
var ok bool
|
||||
var err error
|
||||
matcherSet, ok, err = st.matcherSetFromMatcherToken(d.Token(), matcherDefs, warnings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
// found a matcher; save it, then splice it out
|
||||
// since we don't want to parse it again
|
||||
matcherToken = d.Token()
|
||||
tkns = d.Delete()
|
||||
}
|
||||
d.RemainingArgs() // advance to end of line
|
||||
}
|
||||
for d.NextBlock() {
|
||||
// skip entire block including any nested blocks; all
|
||||
// we care about is accessing next directive occurrence
|
||||
for d.Nested() {
|
||||
d.NextBlock()
|
||||
}
|
||||
}
|
||||
end := d.Cursor() + 1
|
||||
m[matcherToken.Text] = matcherSetAndTokens{
|
||||
matcherSet: matcherSet,
|
||||
tokens: append(m[matcherToken.Text].tokens, tkns[:end]...),
|
||||
}
|
||||
tkns = tkns[end:]
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) matcherSetFromMatcherToken(
|
||||
tkn caddyfile.Token,
|
||||
matcherDefs map[string]map[string]json.RawMessage,
|
||||
warnings *[]caddyconfig.Warning,
|
||||
) (map[string]json.RawMessage, bool, error) {
|
||||
// matcher tokens can be wildcards, simple path matchers,
|
||||
// or refer to a pre-defined matcher by some name
|
||||
if tkn.Text == "*" {
|
||||
// match all requests == no matchers, so nothing to do
|
||||
return nil, true, nil
|
||||
} else if strings.HasPrefix(tkn.Text, "/") {
|
||||
// convenient way to specify a single path match
|
||||
return map[string]json.RawMessage{
|
||||
"path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
|
||||
}, true, nil
|
||||
} else if strings.HasPrefix(tkn.Text, "match:") {
|
||||
// pre-defined matcher
|
||||
matcherName := strings.TrimPrefix(tkn.Text, "match:")
|
||||
m, ok := matcherDefs[matcherName]
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("unrecognized matcher name: %+v", matcherName)
|
||||
}
|
||||
return m, true, nil
|
||||
}
|
||||
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func (st *ServerType) compileEncodedMatcherSets(sblock caddyfile.ServerBlock) ([]map[string]json.RawMessage, error) {
|
||||
type hostPathPair struct {
|
||||
hostm caddyhttp.MatchHost
|
||||
pathm caddyhttp.MatchPath
|
||||
}
|
||||
|
||||
// keep routes with common host and path matchers together
|
||||
var matcherPairs []*hostPathPair
|
||||
|
||||
for _, key := range sblock.Keys {
|
||||
addr, err := standardizeAddress(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server block %v: parsing and standardizing address '%s': %v", sblock.Keys, key, err)
|
||||
}
|
||||
|
||||
// choose a matcher pair that should be shared by this
|
||||
// server block; if none exists yet, create one
|
||||
var chosenMatcherPair *hostPathPair
|
||||
for _, mp := range matcherPairs {
|
||||
if (len(mp.pathm) == 0 && addr.Path == "") ||
|
||||
(len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
|
||||
chosenMatcherPair = mp
|
||||
break
|
||||
}
|
||||
}
|
||||
if chosenMatcherPair == nil {
|
||||
chosenMatcherPair = new(hostPathPair)
|
||||
if addr.Path != "" {
|
||||
chosenMatcherPair.pathm = []string{addr.Path}
|
||||
}
|
||||
matcherPairs = append(matcherPairs, chosenMatcherPair)
|
||||
}
|
||||
|
||||
// add this server block's keys to the matcher
|
||||
// pair if it doesn't already exist
|
||||
if addr.Host != "" {
|
||||
var found bool
|
||||
for _, h := range chosenMatcherPair.hostm {
|
||||
if h == addr.Host {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iterate each pairing of host and path matchers and
|
||||
// put them into a map for JSON encoding
|
||||
var matcherSets []map[string]caddyhttp.RequestMatcher
|
||||
for _, mp := range matcherPairs {
|
||||
matcherSet := make(map[string]caddyhttp.RequestMatcher)
|
||||
if len(mp.hostm) > 0 {
|
||||
matcherSet["host"] = mp.hostm
|
||||
}
|
||||
if len(mp.pathm) > 0 {
|
||||
matcherSet["path"] = mp.pathm
|
||||
}
|
||||
if len(matcherSet) > 0 {
|
||||
matcherSets = append(matcherSets, matcherSet)
|
||||
}
|
||||
}
|
||||
|
||||
// finally, encode each of the matcher sets
|
||||
var matcherSetsEnc []map[string]json.RawMessage
|
||||
for _, ms := range matcherSets {
|
||||
msEncoded, err := encodeMatcherSet(ms)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server block %v: %v", sblock.Keys, err)
|
||||
}
|
||||
matcherSetsEnc = append(matcherSetsEnc, msEncoded)
|
||||
}
|
||||
|
||||
return matcherSetsEnc, nil
|
||||
}
|
||||
|
||||
func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (map[string]json.RawMessage, error) {
|
||||
msEncoded := make(map[string]json.RawMessage)
|
||||
for matcherName, val := range matchers {
|
||||
jsonBytes, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
|
||||
}
|
||||
msEncoded[matcherName] = jsonBytes
|
||||
}
|
||||
return msEncoded, nil
|
||||
}
|
||||
|
||||
// HandlerDirective implements a directive for an HTTP handler,
|
||||
// in that it can unmarshal its own configuration from Caddyfile
|
||||
// tokens and also specify which directive bucket it belongs in.
|
||||
type HandlerDirective interface {
|
||||
caddyfile.Unmarshaler
|
||||
Bucket() int
|
||||
}
|
||||
|
||||
// tryInt tries to convert str to an integer. If it fails, it downgrades
|
||||
// the error to a warning and returns 0.
|
||||
func tryInt(str string, warnings *[]caddyconfig.Warning) int {
|
||||
if str == "" {
|
||||
return 0
|
||||
}
|
||||
val, err := strconv.Atoi(str)
|
||||
if err != nil && warnings != nil {
|
||||
*warnings = append(*warnings, caddyconfig.Warning{Message: err.Error()})
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
type matcherSetAndTokens struct {
|
||||
matcherSet map[string]json.RawMessage
|
||||
tokens []caddyfile.Token
|
||||
}
|
||||
|
||||
// sbAddrAssocation is a mapping from a list of
|
||||
// addresses to a list of server blocks that are
|
||||
// served on those addresses.
|
||||
type sbAddrAssociation struct {
|
||||
addresses []string
|
||||
serverBlocks []caddyfile.ServerBlock
|
||||
}
|
||||
|
||||
// Interface guard
|
||||
var _ caddyfile.ServerType = (*ServerType)(nil)
|
|
@ -18,6 +18,7 @@ import (
|
|||
caddycmd "github.com/caddyserver/caddy/v2/cmd"
|
||||
|
||||
// this is where modules get plugged in
|
||||
_ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
_ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
_ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
||||
_ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode/brotli"
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/mholt/certmagic"
|
||||
"github.com/mitchellh/go-ps"
|
||||
)
|
||||
|
@ -38,6 +39,7 @@ import (
|
|||
func cmdStart() (int, error) {
|
||||
startCmd := flag.NewFlagSet("start", flag.ExitOnError)
|
||||
startCmdConfigFlag := startCmd.String("config", "", "Configuration file")
|
||||
startCmdConfigAdapterFlag := startCmd.String("config-adapter", "", "Name of config adapter to apply")
|
||||
startCmd.Parse(os.Args[2:])
|
||||
|
||||
// open a listener to which the child process will connect when
|
||||
|
@ -62,6 +64,9 @@ func cmdStart() (int, error) {
|
|||
if *startCmdConfigFlag != "" {
|
||||
cmd.Args = append(cmd.Args, "--config", *startCmdConfigFlag)
|
||||
}
|
||||
if *startCmdConfigAdapterFlag != "" {
|
||||
cmd.Args = append(cmd.Args, "--config-adapter", *startCmdConfigAdapterFlag)
|
||||
}
|
||||
stdinpipe, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
|
@ -137,7 +142,8 @@ func cmdStart() (int, error) {
|
|||
func cmdRun() (int, error) {
|
||||
runCmd := flag.NewFlagSet("run", flag.ExitOnError)
|
||||
runCmdConfigFlag := runCmd.String("config", "", "Configuration file")
|
||||
runCmdPrintEnvFlag := runCmd.Bool("print-env", false, "Print environment (useful for debugging)")
|
||||
runCmdConfigAdapterFlag := runCmd.String("config-adapter", "", "Name of config adapter to apply")
|
||||
runCmdPrintEnvFlag := runCmd.Bool("print-env", false, "Print environment")
|
||||
runCmdPingbackFlag := runCmd.String("pingback", "", "Echo confirmation bytes to this address on success")
|
||||
runCmd.Parse(os.Args[2:])
|
||||
|
||||
|
@ -149,16 +155,10 @@ func cmdRun() (int, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// if a config file was specified for bootstrapping
|
||||
// the server instance, load it now
|
||||
var config []byte
|
||||
if *runCmdConfigFlag != "" {
|
||||
var err error
|
||||
config, err = ioutil.ReadFile(*runCmdConfigFlag)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("reading config file: %v", err)
|
||||
}
|
||||
// get the config in caddy's native format
|
||||
config, err := loadConfig(*runCmdConfigFlag, *runCmdConfigAdapterFlag)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup, err
|
||||
}
|
||||
|
||||
// set a fitting User-Agent for ACME requests
|
||||
|
@ -167,7 +167,7 @@ func cmdRun() (int, error) {
|
|||
certmagic.UserAgent = "Caddy/" + cleanModVersion
|
||||
|
||||
// start the admin endpoint along with any initial config
|
||||
err := caddy.StartAdmin(config)
|
||||
err = caddy.StartAdmin(config)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||
|
@ -226,6 +226,7 @@ func cmdStop() (int, error) {
|
|||
func cmdReload() (int, error) {
|
||||
reloadCmd := flag.NewFlagSet("load", flag.ExitOnError)
|
||||
reloadCmdConfigFlag := reloadCmd.String("config", "", "Configuration file")
|
||||
reloadCmdConfigAdapterFlag := reloadCmd.String("config-adapter", "", "Name of config adapter to apply")
|
||||
reloadCmdAddrFlag := reloadCmd.String("address", "", "Address of the administration listener, if different from config")
|
||||
reloadCmd.Parse(os.Args[2:])
|
||||
|
||||
|
@ -235,11 +236,10 @@ func cmdReload() (int, error) {
|
|||
fmt.Errorf("no configuration to load (use --config)")
|
||||
}
|
||||
|
||||
// load the configuration file
|
||||
config, err := ioutil.ReadFile(*reloadCmdConfigFlag)
|
||||
// get the config in caddy's native format
|
||||
config, err := loadConfig(*reloadCmdConfigFlag, *reloadCmdConfigAdapterFlag)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("reading config file: %v", err)
|
||||
return caddy.ExitCodeFailedStartup, err
|
||||
}
|
||||
|
||||
// get the address of the admin listener and craft endpoint URL
|
||||
|
@ -306,3 +306,52 @@ func cmdEnviron() (int, error) {
|
|||
}
|
||||
return caddy.ExitCodeSuccess, nil
|
||||
}
|
||||
|
||||
func cmdAdaptConfig() (int, error) {
|
||||
adaptCmd := flag.NewFlagSet("adapt", flag.ExitOnError)
|
||||
adaptCmdAdapterFlag := adaptCmd.String("adapter", "", "Name of config adapter")
|
||||
adaptCmdInputFlag := adaptCmd.String("input", "", "Configuration file to adapt")
|
||||
adaptCmdPrettyFlag := adaptCmd.Bool("pretty", false, "Format the output for human readability")
|
||||
adaptCmd.Parse(os.Args[2:])
|
||||
|
||||
if *adaptCmdAdapterFlag == "" || *adaptCmdInputFlag == "" {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("usage: caddy adapt-config --adapter <name> --input <file>")
|
||||
}
|
||||
|
||||
cfgAdapter := caddyconfig.GetAdapter(*adaptCmdAdapterFlag)
|
||||
if cfgAdapter == nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("unrecognized config adapter: %s", *adaptCmdAdapterFlag)
|
||||
}
|
||||
|
||||
input, err := ioutil.ReadFile(*adaptCmdInputFlag)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup,
|
||||
fmt.Errorf("reading input file: %v", err)
|
||||
}
|
||||
|
||||
opts := make(map[string]string)
|
||||
if *adaptCmdPrettyFlag {
|
||||
opts["pretty"] = "true"
|
||||
}
|
||||
|
||||
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
|
||||
if err != nil {
|
||||
return caddy.ExitCodeFailedStartup, err
|
||||
}
|
||||
|
||||
// print warnings to stderr
|
||||
for _, warn := range warnings {
|
||||
msg := warn.Message
|
||||
if warn.Directive != "" {
|
||||
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
|
||||
}
|
||||
log.Printf("[WARNING][%s] %s:%d: %s", *adaptCmdAdapterFlag, warn.File, warn.Line, msg)
|
||||
}
|
||||
|
||||
// print result to stdout
|
||||
fmt.Println(string(adaptedConfig))
|
||||
|
||||
return caddy.ExitCodeSuccess, nil
|
||||
}
|
||||
|
|
65
cmd/main.go
65
cmd/main.go
|
@ -25,6 +25,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
)
|
||||
|
||||
// Main implements the main function of the caddy command.
|
||||
|
@ -62,6 +63,7 @@ var commands = map[string]commandFunc{
|
|||
"version": cmdVersion,
|
||||
"list-modules": cmdListModules,
|
||||
"environ": cmdEnviron,
|
||||
"adapt-config": cmdAdaptConfig,
|
||||
}
|
||||
|
||||
func usageString() string {
|
||||
|
@ -85,3 +87,66 @@ func handlePingbackConn(conn net.Conn, expect []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadConfig loads the config from configFile and adapts it
|
||||
// using adapterName. If adapterName is specified, configFile
|
||||
// must be also. It prints any warnings to stderr, and returns
|
||||
// the resulting JSON config bytes.
|
||||
func loadConfig(configFile, adapterName string) ([]byte, error) {
|
||||
// specifying an adapter without a config file is ambiguous
|
||||
if configFile == "" && adapterName != "" {
|
||||
return nil, fmt.Errorf("cannot adapt config without config file (use --config)")
|
||||
}
|
||||
|
||||
// load initial config and adapter
|
||||
var config []byte
|
||||
var cfgAdapter caddyconfig.Adapter
|
||||
var err error
|
||||
if configFile != "" {
|
||||
config, err = ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config file: %v", err)
|
||||
}
|
||||
} else if adapterName == "" {
|
||||
// as a special case when no config file or adapter
|
||||
// is specified, see if the Caddyfile adapter is
|
||||
// plugged in, and if so, try using a default Caddyfile
|
||||
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
|
||||
if cfgAdapter != nil {
|
||||
config, err = ioutil.ReadFile("Caddyfile")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("reading default Caddyfile: %v", err)
|
||||
}
|
||||
configFile = "Caddyfile"
|
||||
}
|
||||
}
|
||||
|
||||
// load config adapter
|
||||
if adapterName != "" {
|
||||
cfgAdapter = caddyconfig.GetAdapter(adapterName)
|
||||
if cfgAdapter == nil {
|
||||
return nil, fmt.Errorf("unrecognized config adapter: %s", adapterName)
|
||||
}
|
||||
}
|
||||
|
||||
// adapt config
|
||||
if cfgAdapter != nil {
|
||||
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]string{
|
||||
"filename": configFile,
|
||||
// TODO: all other options... (http-port, etc...)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("adapting config using %s: %v", adapterName, err)
|
||||
}
|
||||
for _, warn := range warnings {
|
||||
msg := warn.Message
|
||||
if warn.Directive != "" {
|
||||
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
|
||||
}
|
||||
fmt.Printf("[WARNING][%s] %s:%d: %s", adapterName, warn.File, warn.Line, msg)
|
||||
}
|
||||
config = adaptedConfig
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
|
20
modules.go
20
modules.go
|
@ -38,6 +38,26 @@ type Module struct {
|
|||
New func() interface{}
|
||||
}
|
||||
|
||||
// ID returns a module's ID, which is the
|
||||
// last element of its name.
|
||||
func (m Module) ID() string {
|
||||
if m.Name == "" {
|
||||
return ""
|
||||
}
|
||||
parts := strings.Split(m.Name, ".")
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// Namespace returns the module's namespace (scope)
|
||||
// which is all but the last element of its name.
|
||||
func (m Module) Namespace() string {
|
||||
lastDot := strings.LastIndex(m.Name, ".")
|
||||
if lastDot < 0 {
|
||||
return ""
|
||||
}
|
||||
return m.Name[:lastDot]
|
||||
}
|
||||
|
||||
func (m Module) String() string { return m.Name }
|
||||
|
||||
// RegisterModule registers a module. Modules must call
|
||||
|
|
|
@ -15,9 +15,12 @@
|
|||
package caddyhttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
weakrand "math/rand"
|
||||
"net"
|
||||
|
@ -244,6 +247,14 @@ func (app *App) automaticHTTPS() error {
|
|||
for d := range domainSet {
|
||||
domains = append(domains, d)
|
||||
if !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.SkipCerts) {
|
||||
// if a certificate for this name is already loaded,
|
||||
// don't obtain another one for it, unless we are
|
||||
// supposed to ignore loaded certificates
|
||||
if !srv.AutoHTTPS.IgnoreLoadedCerts &&
|
||||
len(tlsApp.CertificatesWithSAN(d)) > 0 {
|
||||
log.Printf("[INFO][%s] Skipping automatic certificate management because a certificate with that SAN is already loaded", d)
|
||||
continue
|
||||
}
|
||||
domainsForCerts = append(domainsForCerts, d)
|
||||
}
|
||||
}
|
||||
|
@ -319,7 +330,7 @@ func (app *App) automaticHTTPS() error {
|
|||
}
|
||||
redirTo += "{http.request.uri}"
|
||||
|
||||
redirRoutes = append(redirRoutes, ServerRoute{
|
||||
redirRoutes = append(redirRoutes, Route{
|
||||
matcherSets: []MatcherSet{
|
||||
{
|
||||
MatchProtocol("http"),
|
||||
|
@ -328,7 +339,7 @@ func (app *App) automaticHTTPS() error {
|
|||
},
|
||||
handlers: []MiddlewareHandler{
|
||||
StaticResponse{
|
||||
StatusCode: weakString(strconv.Itoa(http.StatusTemporaryRedirect)), // TODO: use permanent redirect instead
|
||||
StatusCode: WeakString(strconv.Itoa(http.StatusTemporaryRedirect)), // TODO: use permanent redirect instead
|
||||
Headers: http.Header{
|
||||
"Location": []string{redirTo},
|
||||
"Connection": []string{"close"},
|
||||
|
@ -431,6 +442,77 @@ type MiddlewareHandler interface {
|
|||
// emptyHandler is used as a no-op handler.
|
||||
var emptyHandler HandlerFunc = func(http.ResponseWriter, *http.Request) error { return nil }
|
||||
|
||||
// WeakString is a type that unmarshals any JSON value
|
||||
// as a string literal, with the following exceptions:
|
||||
// 1) actual string values are decoded as strings, and
|
||||
// 2) null is decoded as empty string
|
||||
// and provides methods for getting the value as various
|
||||
// primitive types. However, using this type removes any
|
||||
// type safety as far as deserializing JSON is concerned.
|
||||
type WeakString string
|
||||
|
||||
// UnmarshalJSON satisfies json.Unmarshaler according to
|
||||
// this type's documentation.
|
||||
func (ws *WeakString) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
if b[0] == byte('"') && b[len(b)-1] == byte('"') {
|
||||
var s string
|
||||
err := json.Unmarshal(b, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ws = WeakString(s)
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(b, []byte("null")) {
|
||||
return nil
|
||||
}
|
||||
*ws = WeakString(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON marshals was a boolean if true or false,
|
||||
// a number if an integer, or a string otherwise.
|
||||
func (ws WeakString) MarshalJSON() ([]byte, error) {
|
||||
if ws == "true" {
|
||||
return []byte("true"), nil
|
||||
}
|
||||
if ws == "false" {
|
||||
return []byte("false"), nil
|
||||
}
|
||||
if num, err := strconv.Atoi(string(ws)); err == nil {
|
||||
return json.Marshal(num)
|
||||
}
|
||||
return json.Marshal(string(ws))
|
||||
}
|
||||
|
||||
// Int returns ws as an integer. If ws is not an
|
||||
// integer, 0 is returned.
|
||||
func (ws WeakString) Int() int {
|
||||
num, _ := strconv.Atoi(string(ws))
|
||||
return num
|
||||
}
|
||||
|
||||
// Float64 returns ws as a float64. If ws is not a
|
||||
// float value, the zero value is returned.
|
||||
func (ws WeakString) Float64() float64 {
|
||||
num, _ := strconv.ParseFloat(string(ws), 64)
|
||||
return num
|
||||
}
|
||||
|
||||
// Bool returns ws as a boolean. If ws is not a
|
||||
// boolean, false is returned.
|
||||
func (ws WeakString) Bool() bool {
|
||||
return string(ws) == "true"
|
||||
}
|
||||
|
||||
// String returns ws as a string.
|
||||
func (ws WeakString) String() string {
|
||||
return string(ws)
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultHTTPPort is the default port for HTTP.
|
||||
DefaultHTTPPort = 80
|
||||
|
|
|
@ -16,8 +16,10 @@ package caddybrotli
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
||||
)
|
||||
|
@ -35,6 +37,22 @@ type Brotli struct {
|
|||
Quality *int `json:"quality,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
|
||||
func (b *Brotli) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
if !d.NextArg() {
|
||||
continue
|
||||
}
|
||||
qualityStr := d.Val()
|
||||
quality, err := strconv.Atoi(qualityStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Quality = &quality
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate validates b's configuration.
|
||||
func (b Brotli) Validate() error {
|
||||
if b.Quality != nil {
|
||||
|
@ -64,6 +82,7 @@ func (b Brotli) NewEncoder() encode.Encoder {
|
|||
|
||||
// Interface guards
|
||||
var (
|
||||
_ encode.Encoding = (*Brotli)(nil)
|
||||
_ caddy.Validator = (*Brotli)(nil)
|
||||
_ encode.Encoding = (*Brotli)(nil)
|
||||
_ caddy.Validator = (*Brotli)(nil)
|
||||
_ caddyfile.Unmarshaler = (*Brotli)(nil)
|
||||
)
|
||||
|
|
85
modules/caddyhttp/encode/caddyfile.go
Normal file
85
modules/caddyhttp/encode/caddyfile.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/caddyserver/caddy/caddyconfig"
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
)
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// encode [<matcher>] <formats...> {
|
||||
// gzip [<level>]
|
||||
// zstd
|
||||
// brotli [<quality>]
|
||||
// }
|
||||
//
|
||||
// Specifying the formats on the first line will use those formats' defaults.
|
||||
func (enc *Encode) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
for _, arg := range d.RemainingArgs() {
|
||||
mod, err := caddy.GetModule("http.encoders." + arg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding encoder module '%s': %v", mod.Name, err)
|
||||
}
|
||||
encoding, ok := mod.New().(Encoding)
|
||||
if !ok {
|
||||
return fmt.Errorf("module %s is not an HTTP encoding", mod.Name)
|
||||
}
|
||||
if enc.EncodingsRaw == nil {
|
||||
enc.EncodingsRaw = make(map[string]json.RawMessage)
|
||||
}
|
||||
enc.EncodingsRaw[arg] = caddyconfig.JSON(encoding, nil)
|
||||
}
|
||||
|
||||
for d.NextBlock() {
|
||||
name := d.Val()
|
||||
mod, err := caddy.GetModule("http.encoders." + name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting encoder module '%s': %v", mod.Name, err)
|
||||
}
|
||||
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
||||
if !ok {
|
||||
return fmt.Errorf("encoder module '%s' is not a Caddyfile unmarshaler", mod.Name)
|
||||
}
|
||||
err = unm.UnmarshalCaddyfile(d.NewFromNextTokens())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoding, ok := unm.(Encoding)
|
||||
if !ok {
|
||||
return fmt.Errorf("module %s is not an HTTP encoding", mod.Name)
|
||||
}
|
||||
if enc.EncodingsRaw == nil {
|
||||
enc.EncodingsRaw = make(map[string]json.RawMessage)
|
||||
}
|
||||
enc.EncodingsRaw[name] = caddyconfig.JSON(encoding, nil)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (enc Encode) Bucket() int { return 3 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*Encode)(nil)
|
|
@ -52,19 +52,15 @@ type Encode struct {
|
|||
|
||||
// Provision provisions enc.
|
||||
func (enc *Encode) Provision(ctx caddy.Context) error {
|
||||
enc.writerPools = make(map[string]*sync.Pool)
|
||||
|
||||
for modName, rawMsg := range enc.EncodingsRaw {
|
||||
val, err := ctx.LoadModule("http.encoders."+modName, rawMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading encoder module '%s': %v", modName, err)
|
||||
}
|
||||
encoder := val.(Encoding)
|
||||
|
||||
enc.writerPools[encoder.AcceptEncoding()] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return encoder.NewEncoder()
|
||||
},
|
||||
encoding := val.(Encoding)
|
||||
err = enc.addEncoding(encoding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
enc.EncodingsRaw = nil // allow GC to deallocate - TODO: Does this help?
|
||||
|
@ -85,10 +81,28 @@ func (enc *Encode) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyh
|
|||
defer w.(*responseWriter).Close()
|
||||
break
|
||||
}
|
||||
|
||||
return next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (enc *Encode) addEncoding(e Encoding) error {
|
||||
ae := e.AcceptEncoding()
|
||||
if ae == "" {
|
||||
return fmt.Errorf("encoder does not specify an Accept-Encoding value")
|
||||
}
|
||||
if _, ok := enc.writerPools[ae]; ok {
|
||||
return fmt.Errorf("encoder already added: %s", ae)
|
||||
}
|
||||
if enc.writerPools == nil {
|
||||
enc.writerPools = make(map[string]*sync.Pool)
|
||||
}
|
||||
enc.writerPools[ae] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return e.NewEncoder()
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// openResponseWriter creates a new response writer that may (or may not)
|
||||
// encode the response with encodingName. The returned response writer MUST
|
||||
// be closed after the handler completes.
|
||||
|
|
|
@ -18,7 +18,9 @@ import (
|
|||
"compress/flate"
|
||||
"compress/gzip" // TODO: consider using https://github.com/klauspost/compress/gzip
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
||||
)
|
||||
|
@ -35,6 +37,22 @@ type Gzip struct {
|
|||
Level int `json:"level,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
|
||||
func (g *Gzip) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
if !d.NextArg() {
|
||||
continue
|
||||
}
|
||||
levelStr := d.Val()
|
||||
level, err := strconv.Atoi(levelStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Level = level
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provision provisions g's configuration.
|
||||
func (g *Gzip) Provision(ctx caddy.Context) error {
|
||||
if g.Level == 0 {
|
||||
|
@ -69,7 +87,8 @@ var defaultGzipLevel = 5
|
|||
|
||||
// Interface guards
|
||||
var (
|
||||
_ encode.Encoding = (*Gzip)(nil)
|
||||
_ caddy.Provisioner = (*Gzip)(nil)
|
||||
_ caddy.Validator = (*Gzip)(nil)
|
||||
_ encode.Encoding = (*Gzip)(nil)
|
||||
_ caddy.Provisioner = (*Gzip)(nil)
|
||||
_ caddy.Validator = (*Gzip)(nil)
|
||||
_ caddyfile.Unmarshaler = (*Gzip)(nil)
|
||||
)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package caddyzstd
|
||||
|
||||
import (
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
|
@ -30,6 +31,11 @@ func init() {
|
|||
// Zstd can create Zstandard encoders.
|
||||
type Zstd struct{}
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
|
||||
func (z *Zstd) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptEncoding returns the name of the encoding as
|
||||
// used in the Accept-Encoding request headers.
|
||||
func (Zstd) AcceptEncoding() string { return "zstd" }
|
||||
|
@ -40,5 +46,8 @@ func (z Zstd) NewEncoder() encode.Encoder {
|
|||
return writer
|
||||
}
|
||||
|
||||
// Interface guard
|
||||
var _ encode.Encoding = (*Zstd)(nil)
|
||||
// Interface guards
|
||||
var (
|
||||
_ encode.Encoding = (*Zstd)(nil)
|
||||
_ caddyfile.Unmarshaler = (*Zstd)(nil)
|
||||
)
|
||||
|
|
87
modules/caddyhttp/fileserver/caddyfile.go
Normal file
87
modules/caddyhttp/fileserver/caddyfile.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileserver
|
||||
|
||||
import (
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
)
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// file_server [<matcher>] [browse] {
|
||||
// hide <files...>
|
||||
// index <files...>
|
||||
// browse [<template_file>]
|
||||
// root <path>
|
||||
// }
|
||||
//
|
||||
// If browse is given on the first line, it can't be used in the block also.
|
||||
// The default root is the one given by the root directive.
|
||||
func (fsrv *FileServer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
args := d.RemainingArgs()
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
if args[0] != "browse" {
|
||||
return d.ArgErr()
|
||||
}
|
||||
fsrv.Browse = new(Browse)
|
||||
default:
|
||||
return d.ArgErr()
|
||||
}
|
||||
|
||||
for d.NextBlock() {
|
||||
switch d.Val() {
|
||||
case "hide":
|
||||
fsrv.Hide = d.RemainingArgs()
|
||||
if len(fsrv.Hide) == 0 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "index":
|
||||
fsrv.IndexNames = d.RemainingArgs()
|
||||
if len(fsrv.Hide) == 0 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "root":
|
||||
if !d.Args(&fsrv.Root) {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "browse":
|
||||
if fsrv.Browse != nil {
|
||||
return d.Err("browsing is already configured")
|
||||
}
|
||||
fsrv.Browse = new(Browse)
|
||||
d.Args(&fsrv.Browse.TemplateFile)
|
||||
default:
|
||||
return d.Errf("unknown subdirective '%s'", d.Val())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if no root was configured explicitly, use site root
|
||||
if fsrv.Root == "" {
|
||||
fsrv.Root = "{http.var.root}"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (fsrv FileServer) Bucket() int { return 7 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*FileServer)(nil)
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||
)
|
||||
|
@ -51,6 +52,39 @@ type MatchFile struct {
|
|||
TryPolicy string `json:"try_policy,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// file {
|
||||
// root <path>
|
||||
// try_files <files...>
|
||||
// try_policy <first_exist|smallest_size|largest_size|most_recent_modified>
|
||||
// }
|
||||
//
|
||||
func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
for d.NextBlock() {
|
||||
switch d.Val() {
|
||||
case "root":
|
||||
if !d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
m.Root = d.Val()
|
||||
case "try_files":
|
||||
m.TryFiles = d.RemainingArgs()
|
||||
if len(m.TryFiles) == 0 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "try_policy":
|
||||
if !d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
m.TryPolicy = d.Val()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate ensures m has a valid configuration.
|
||||
func (m MatchFile) Validate() error {
|
||||
switch m.TryPolicy {
|
||||
|
|
|
@ -48,8 +48,6 @@ type FileServer struct {
|
|||
Hide []string `json:"hide,omitempty"`
|
||||
IndexNames []string `json:"index_names,omitempty"`
|
||||
Browse *Browse `json:"browse,omitempty"`
|
||||
|
||||
// TODO: Content negotiation
|
||||
}
|
||||
|
||||
// Provision sets up the static files responder.
|
||||
|
@ -83,7 +81,7 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, _ cadd
|
|||
|
||||
filesToHide := fsrv.transformHidePaths(repl)
|
||||
|
||||
root := repl.ReplaceAll(fsrv.Root, "")
|
||||
root := repl.ReplaceAll(fsrv.Root, ".")
|
||||
suffix := repl.ReplaceAll(r.URL.Path, "")
|
||||
filename := sanitizedPathJoin(root, suffix)
|
||||
|
||||
|
@ -302,7 +300,7 @@ func calculateEtag(d os.FileInfo) string {
|
|||
return `"` + t + s + `"`
|
||||
}
|
||||
|
||||
var defaultIndexNames = []string{"index.html"}
|
||||
var defaultIndexNames = []string{"index.html", "index.txt"}
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
|
|
92
modules/caddyhttp/headers/caddyfile.go
Normal file
92
modules/caddyhttp/headers/caddyfile.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package headers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
)
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// headers [<matcher>] [[+|-]<field> <value>] {
|
||||
// [+][<field>] [<value>]
|
||||
// [-<field>]
|
||||
// }
|
||||
//
|
||||
// Either a block can be opened or a single header field can be configured
|
||||
// in the first line, but not both in the same directive.
|
||||
func (h *Headers) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
// first see if headers are in the initial line
|
||||
var hasArgs bool
|
||||
if d.NextArg() {
|
||||
hasArgs = true
|
||||
field := d.Val()
|
||||
d.NextArg()
|
||||
value := d.Val()
|
||||
h.processCaddyfileLine(field, value)
|
||||
}
|
||||
|
||||
// if not, they should be in a block
|
||||
for d.NextBlock() {
|
||||
if hasArgs {
|
||||
return d.Err("cannot specify headers in both arguments and block")
|
||||
}
|
||||
field := d.Val()
|
||||
var value string
|
||||
if d.NextArg() {
|
||||
value = d.Val()
|
||||
}
|
||||
h.processCaddyfileLine(field, value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headers) processCaddyfileLine(field, value string) {
|
||||
if strings.HasPrefix(field, "+") {
|
||||
if h.Response == nil {
|
||||
h.Response = &RespHeaderOps{HeaderOps: new(HeaderOps)}
|
||||
}
|
||||
if h.Response.Add == nil {
|
||||
h.Response.Add = make(http.Header)
|
||||
}
|
||||
h.Response.Add.Set(field[1:], value)
|
||||
} else if strings.HasPrefix(field, "-") {
|
||||
if h.Response == nil {
|
||||
h.Response = &RespHeaderOps{HeaderOps: new(HeaderOps)}
|
||||
}
|
||||
h.Response.Delete = append(h.Response.Delete, field[1:])
|
||||
h.Response.Deferred = true
|
||||
} else {
|
||||
if h.Response == nil {
|
||||
h.Response = &RespHeaderOps{HeaderOps: new(HeaderOps)}
|
||||
}
|
||||
if h.Response.Set == nil {
|
||||
h.Response.Set = make(http.Header)
|
||||
}
|
||||
h.Response.Set.Set(field, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (h Headers) Bucket() int { return 3 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*Headers)(nil)
|
|
@ -28,6 +28,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/v2/pkg/caddyscript"
|
||||
"go.starlark.net/starlark"
|
||||
)
|
||||
|
@ -125,6 +126,12 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
*m = d.RemainingArgs()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchHost) Match(r *http.Request) bool {
|
||||
reqHost, _, err := net.SplitHostPort(r.Host)
|
||||
|
@ -177,12 +184,24 @@ func (m MatchPath) Match(r *http.Request) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
*m = d.RemainingArgs()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchPathRE) Match(r *http.Request) bool {
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
|
||||
return m.MatchRegexp.Match(r.URL.Path, repl, "path_regexp")
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
*m = d.RemainingArgs()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchMethod) Match(r *http.Request) bool {
|
||||
for _, method := range m {
|
||||
|
@ -193,6 +212,18 @@ func (m MatchMethod) Match(r *http.Request) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
parts := strings.SplitN(d.Val(), "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
|
||||
}
|
||||
url.Values(*m).Set(parts[0], parts[1])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchQuery) Match(r *http.Request) bool {
|
||||
for param, vals := range m {
|
||||
|
@ -206,6 +237,18 @@ func (m MatchQuery) Match(r *http.Request) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
var field, val string
|
||||
if !d.Args(&field, &val) {
|
||||
return d.Errf("expected both field and value")
|
||||
}
|
||||
http.Header(*m).Set(field, val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchHeader) Match(r *http.Request) bool {
|
||||
for field, allowedFieldVals := range m {
|
||||
|
@ -227,6 +270,21 @@ func (m MatchHeader) Match(r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
if *m == nil {
|
||||
*m = make(map[string]*MatchRegexp)
|
||||
}
|
||||
for d.Next() {
|
||||
var field, val string
|
||||
if !d.Args(&field, &val) {
|
||||
return d.ArgErr()
|
||||
}
|
||||
(*m)[field] = &MatchRegexp{Pattern: val}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns true if r matches m.
|
||||
func (m MatchHeaderRE) Match(r *http.Request) bool {
|
||||
for field, rm := range m {
|
||||
|
@ -274,6 +332,16 @@ func (m MatchProtocol) Match(r *http.Request) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
var proto string
|
||||
if !d.Args(&proto) {
|
||||
return d.Err("expected exactly one protocol")
|
||||
}
|
||||
*m = MatchProtocol(proto)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals data into m's unexported map field.
|
||||
// This is done because we cannot embed the map directly into
|
||||
// the struct, but we need a struct because we need another
|
||||
|
@ -282,6 +350,12 @@ func (m *MatchNegate) UnmarshalJSON(data []byte) error {
|
|||
return json.Unmarshal(data, &m.matchersRaw)
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchNegate) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
// TODO: figure out how this will work
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provision loads the matcher modules to be negated.
|
||||
func (m *MatchNegate) Provision(ctx caddy.Context) error {
|
||||
for modName, rawMsg := range m.matchersRaw {
|
||||
|
@ -301,6 +375,12 @@ func (m MatchNegate) Match(r *http.Request) bool {
|
|||
return !m.matchers.Match(r)
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
m.Ranges = d.RemainingArgs()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provision parses m's IP ranges, either from IP or CIDR expressions.
|
||||
func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
|
||||
for _, str := range m.Ranges {
|
||||
|
@ -379,7 +459,7 @@ func (m MatchStarlarkExpr) Match(r *http.Request) bool {
|
|||
// MatchRegexp is an embeddable type for matching
|
||||
// using regular expressions.
|
||||
type MatchRegexp struct {
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Pattern string `json:"pattern"`
|
||||
compiled *regexp.Regexp
|
||||
}
|
||||
|
@ -431,6 +511,14 @@ func (mre *MatchRegexp) Match(input string, repl caddy.Replacer, scope string) b
|
|||
return true
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
if !d.Args(&mre.Name, &mre.Pattern) {
|
||||
return fmt.Errorf("missing arguments")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResponseMatcher is a type which can determine if a given response
|
||||
// status code and its headers match some criteria.
|
||||
type ResponseMatcher struct {
|
||||
|
@ -506,4 +594,14 @@ var (
|
|||
_ caddy.Provisioner = (*MatchNegate)(nil)
|
||||
_ RequestMatcher = (*MatchStarlarkExpr)(nil)
|
||||
_ caddy.Provisioner = (*MatchRegexp)(nil)
|
||||
|
||||
_ caddyfile.Unmarshaler = (*MatchHost)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchPath)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchPathRE)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchMethod)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchQuery)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchHeader)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
|
||||
_ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
|
||||
)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package caddyhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
|
@ -28,6 +29,7 @@ import (
|
|||
func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.ResponseWriter) {
|
||||
httpVars := func(key string) (string, bool) {
|
||||
if req != nil {
|
||||
// query string parameters
|
||||
if strings.HasPrefix(key, queryReplPrefix) {
|
||||
vals := req.URL.Query()[key[len(queryReplPrefix):]]
|
||||
// always return true, since the query param might
|
||||
|
@ -35,6 +37,7 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
|
|||
return strings.Join(vals, ","), true
|
||||
}
|
||||
|
||||
// request header fields
|
||||
if strings.HasPrefix(key, reqHeaderReplPrefix) {
|
||||
field := key[len(reqHeaderReplPrefix):]
|
||||
vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)]
|
||||
|
@ -43,6 +46,7 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
|
|||
return strings.Join(vals, ","), true
|
||||
}
|
||||
|
||||
// cookies
|
||||
if strings.HasPrefix(key, cookieReplPrefix) {
|
||||
name := key[len(cookieReplPrefix):]
|
||||
for _, cookie := range req.Cookies() {
|
||||
|
@ -87,14 +91,7 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
|
|||
return req.URL.RawQuery, true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(key, respHeaderReplPrefix) {
|
||||
field := key[len(respHeaderReplPrefix):]
|
||||
vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)]
|
||||
// always return true, since the header field might
|
||||
// be present only in some requests
|
||||
return strings.Join(vals, ","), true
|
||||
}
|
||||
|
||||
// hostname labels
|
||||
if strings.HasPrefix(key, hostLabelReplPrefix) {
|
||||
idxStr := key[len(hostLabelReplPrefix):]
|
||||
idx, err := strconv.Atoi(idxStr)
|
||||
|
@ -111,6 +108,7 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
|
|||
return hostLabels[idx], true
|
||||
}
|
||||
|
||||
// path parts
|
||||
if strings.HasPrefix(key, pathPartsReplPrefix) {
|
||||
idxStr := key[len(pathPartsReplPrefix):]
|
||||
idx, err := strconv.Atoi(idxStr)
|
||||
|
@ -129,9 +127,31 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
|
|||
}
|
||||
return pathParts[idx], true
|
||||
}
|
||||
|
||||
// middleware variables
|
||||
if strings.HasPrefix(key, varsReplPrefix) {
|
||||
varName := key[len(varsReplPrefix):]
|
||||
tbl := req.Context().Value(VarCtxKey).(map[string]interface{})
|
||||
raw, ok := tbl[varName]
|
||||
if !ok {
|
||||
// variables can be dynamic, so always return true
|
||||
// even when it may not be set; treat as empty
|
||||
return "", true
|
||||
}
|
||||
// do our best to convert it to a string efficiently
|
||||
switch val := raw.(type) {
|
||||
case string:
|
||||
return val, true
|
||||
case fmt.Stringer:
|
||||
return val.String(), true
|
||||
default:
|
||||
return fmt.Sprintf("%s", val), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if w != nil {
|
||||
// response header fields
|
||||
if strings.HasPrefix(key, respHeaderReplPrefix) {
|
||||
field := key[len(respHeaderReplPrefix):]
|
||||
vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)]
|
||||
|
@ -153,5 +173,6 @@ const (
|
|||
cookieReplPrefix = "http.request.cookie."
|
||||
hostLabelReplPrefix = "http.request.host.labels."
|
||||
pathPartsReplPrefix = "http.request.uri.path."
|
||||
varsReplPrefix = "http.var."
|
||||
respHeaderReplPrefix = "http.response.header."
|
||||
)
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
package reverseproxy
|
||||
|
||||
import (
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
)
|
||||
|
||||
|
@ -25,3 +27,27 @@ func init() {
|
|||
New: func() interface{} { return new(LoadBalanced) },
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// proxy [<matcher>] <to>
|
||||
//
|
||||
// TODO: This needs to be finished. It definitely needs to be able to open a block...
|
||||
func (lb *LoadBalanced) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
allTo := d.RemainingArgs()
|
||||
if len(allTo) == 0 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
for _, to := range allTo {
|
||||
lb.Upstreams = append(lb.Upstreams, &UpstreamConfig{Host: to})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (*LoadBalanced) Bucket() int { return 7 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*LoadBalanced)(nil)
|
||||
|
|
|
@ -179,21 +179,21 @@ type LoadBalanced struct {
|
|||
|
||||
// The following struct fields are set by caddy configuration.
|
||||
// TryInterval is the max duration for which request retrys will be performed for a request.
|
||||
TryInterval string `json:"try_interval"`
|
||||
TryInterval string `json:"try_interval,omitempty"`
|
||||
|
||||
// Upstreams are the configs for upstream hosts
|
||||
Upstreams []*UpstreamConfig `json:"upstreams"`
|
||||
Upstreams []*UpstreamConfig `json:"upstreams,omitempty"`
|
||||
|
||||
// LoadBalanceType is the string representation of what loadbalancing algorithm to use. i.e. "random" or "round_robin".
|
||||
LoadBalanceType string `json:"load_balance_type"`
|
||||
LoadBalanceType string `json:"load_balance_type,omitempty"`
|
||||
|
||||
// NoHealthyUpstreamsMessage is returned as a response when there are no healthy upstreams to loadbalance to.
|
||||
NoHealthyUpstreamsMessage string `json:"no_healthy_upstreams_message"`
|
||||
NoHealthyUpstreamsMessage string `json:"no_healthy_upstreams_message,omitempty"`
|
||||
|
||||
// TODO :- store healthcheckers as package level state where each upstream gets a single healthchecker
|
||||
// currently a healthchecker is created for each upstream defined, even if a healthchecker was previously created
|
||||
// for that upstream
|
||||
HealthCheckers []*HealthChecker
|
||||
HealthCheckers []*HealthChecker `json:"health_checkers,omitempty"`
|
||||
}
|
||||
|
||||
// Cleanup stops all health checkers on a loadbalanced reverse proxy.
|
||||
|
@ -320,22 +320,22 @@ func (lb *LoadBalanced) random() *upstream {
|
|||
// UpstreamConfig represents the config of an upstream.
|
||||
type UpstreamConfig struct {
|
||||
// Host is the host name of the upstream server.
|
||||
Host string `json:"host"`
|
||||
Host string `json:"host,omitempty"`
|
||||
|
||||
// FastHealthCheckDuration is the duration for which a health check is performed when a node is considered unhealthy.
|
||||
FastHealthCheckDuration string `json:"fast_health_check_duration"`
|
||||
FastHealthCheckDuration string `json:"fast_health_check_duration,omitempty"`
|
||||
|
||||
CircuitBreaker json.RawMessage `json:"circuit_breaker"`
|
||||
CircuitBreaker json.RawMessage `json:"circuit_breaker,omitempty"`
|
||||
|
||||
// // CircuitBreakerConfig is the config passed to setup a circuit breaker.
|
||||
// CircuitBreakerConfig *circuitbreaker.Config `json:"circuit_breaker"`
|
||||
// CircuitBreakerConfig *circuitbreaker.Config `json:"circuit_breaker,omitempty"`
|
||||
circuitbreaker CircuitBreaker
|
||||
|
||||
// HealthCheckDuration is the default duration for which a health check is performed.
|
||||
HealthCheckDuration string `json:"health_check_duration"`
|
||||
HealthCheckDuration string `json:"health_check_duration,omitempty"`
|
||||
|
||||
// HealthCheckPath is the path at the upstream host to use for healthchecks.
|
||||
HealthCheckPath string `json:"health_check_path"`
|
||||
HealthCheckPath string `json:"health_check_path,omitempty"`
|
||||
}
|
||||
|
||||
// upstream represents an upstream host.
|
||||
|
|
38
modules/caddyhttp/rewrite/caddyfile.go
Normal file
38
modules/caddyhttp/rewrite/caddyfile.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rewrite
|
||||
|
||||
import (
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
)
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// rewrite [<matcher>] <to>
|
||||
//
|
||||
// The <to> parameter becomes the new URI.
|
||||
func (rewr *Rewrite) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
rewr.URI = d.Val()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (rewr Rewrite) Bucket() int { return 1 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*Rewrite)(nil)
|
|
@ -22,10 +22,10 @@ import (
|
|||
"github.com/caddyserver/caddy/v2"
|
||||
)
|
||||
|
||||
// ServerRoute represents a set of matching rules,
|
||||
// Route represents a set of matching rules,
|
||||
// middlewares, and a responder for handling HTTP
|
||||
// requests.
|
||||
type ServerRoute struct {
|
||||
type Route struct {
|
||||
Group string `json:"group,omitempty"`
|
||||
MatcherSets []map[string]json.RawMessage `json:"match,omitempty"`
|
||||
Handle []json.RawMessage `json:"handle,omitempty"`
|
||||
|
@ -37,22 +37,22 @@ type ServerRoute struct {
|
|||
}
|
||||
|
||||
// Empty returns true if the route has all zero/default values.
|
||||
func (sr ServerRoute) Empty() bool {
|
||||
return len(sr.MatcherSets) == 0 &&
|
||||
len(sr.Handle) == 0 &&
|
||||
len(sr.handlers) == 0 &&
|
||||
!sr.Terminal &&
|
||||
sr.Group == ""
|
||||
func (r Route) Empty() bool {
|
||||
return len(r.MatcherSets) == 0 &&
|
||||
len(r.Handle) == 0 &&
|
||||
len(r.handlers) == 0 &&
|
||||
!r.Terminal &&
|
||||
r.Group == ""
|
||||
}
|
||||
|
||||
func (sr ServerRoute) anyMatcherSetMatches(r *http.Request) bool {
|
||||
for _, ms := range sr.matcherSets {
|
||||
if ms.Match(r) {
|
||||
func (r Route) anyMatcherSetMatches(req *http.Request) bool {
|
||||
for _, ms := range r.matcherSets {
|
||||
if ms.Match(req) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// if no matchers, always match
|
||||
return len(sr.matcherSets) == 0
|
||||
return len(r.matcherSets) == 0
|
||||
}
|
||||
|
||||
// MatcherSet is a set of matchers which
|
||||
|
@ -73,7 +73,7 @@ func (mset MatcherSet) Match(r *http.Request) bool {
|
|||
|
||||
// RouteList is a list of server routes that can
|
||||
// create a middleware chain.
|
||||
type RouteList []ServerRoute
|
||||
type RouteList []Route
|
||||
|
||||
// Provision sets up all the routes by loading the modules.
|
||||
func (routes RouteList) Provision(ctx caddy.Context) error {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
repl := caddy.NewReplacer()
|
||||
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
|
||||
ctx = context.WithValue(ctx, ServerCtxKey, s)
|
||||
ctx = context.WithValue(ctx, TableCtxKey, make(map[string]interface{})) // TODO: Implement this
|
||||
ctx = context.WithValue(ctx, VarCtxKey, make(map[string]interface{}))
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
// once the pointer to the request won't change
|
||||
|
@ -201,6 +201,14 @@ type AutoHTTPSConfig struct {
|
|||
// that certificates will not be provisioned and managed
|
||||
// for these names.
|
||||
SkipCerts []string `json:"skip_certificates,omitempty"`
|
||||
|
||||
// By default, automatic HTTPS will obtain and renew
|
||||
// certificates for qualifying hostnames. However, if
|
||||
// a certificate with a matching SAN is already loaded
|
||||
// into the cache, certificate management will not be
|
||||
// enabled. To force automated certificate management
|
||||
// regardless of loaded certificates, set this to true.
|
||||
IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
|
||||
}
|
||||
|
||||
// Skipped returns true if name is in skipSlice, which
|
||||
|
@ -225,6 +233,6 @@ const (
|
|||
// For referencing the server instance
|
||||
ServerCtxKey caddy.CtxKey = "server"
|
||||
|
||||
// For the request's variable table (TODO: implement this)
|
||||
TableCtxKey caddy.CtxKey = "table"
|
||||
// For the request's variable table
|
||||
VarCtxKey caddy.CtxKey = "vars"
|
||||
)
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
)
|
||||
|
@ -33,7 +32,7 @@ func init() {
|
|||
// StaticError implements a simple handler that returns an error.
|
||||
type StaticError struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
StatusCode weakString `json:"status_code,omitempty"`
|
||||
StatusCode WeakString `json:"status_code,omitempty"`
|
||||
}
|
||||
|
||||
func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
|
||||
|
@ -53,43 +52,3 @@ func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler
|
|||
|
||||
// Interface guard
|
||||
var _ MiddlewareHandler = (*StaticError)(nil)
|
||||
|
||||
// weakString is a type that unmarshals any JSON value
|
||||
// as a string literal, and provides methods for
|
||||
// getting the value as different primitive types.
|
||||
// However, using this type removes any type safety
|
||||
// as far as deserializing JSON is concerned.
|
||||
type weakString string
|
||||
|
||||
// UnmarshalJSON satisfies json.Unmarshaler. It
|
||||
// unmarshals b by always interpreting it as a
|
||||
// string literal.
|
||||
func (ws *weakString) UnmarshalJSON(b []byte) error {
|
||||
*ws = weakString(strings.Trim(string(b), `"`))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Int returns ws as an integer. If ws is not an
|
||||
// integer, 0 is returned.
|
||||
func (ws weakString) Int() int {
|
||||
num, _ := strconv.Atoi(string(ws))
|
||||
return num
|
||||
}
|
||||
|
||||
// Float64 returns ws as a float64. If ws is not a
|
||||
// float value, the zero value is returned.
|
||||
func (ws weakString) Float64() float64 {
|
||||
num, _ := strconv.ParseFloat(string(ws), 64)
|
||||
return num
|
||||
}
|
||||
|
||||
// Bool returns ws as a boolean. If ws is not a
|
||||
// boolean, false is returned.
|
||||
func (ws weakString) Bool() bool {
|
||||
return string(ws) == "true"
|
||||
}
|
||||
|
||||
// String returns ws as a string.
|
||||
func (ws weakString) String() string {
|
||||
return string(ws)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -31,12 +32,48 @@ func init() {
|
|||
|
||||
// StaticResponse implements a simple responder for static responses.
|
||||
type StaticResponse struct {
|
||||
StatusCode weakString `json:"status_code"`
|
||||
Headers http.Header `json:"headers"`
|
||||
Body string `json:"body"`
|
||||
Close bool `json:"close"`
|
||||
StatusCode WeakString `json:"status_code,omitempty"`
|
||||
Headers http.Header `json:"headers,omitempty"`
|
||||
Body string `json:"body,omitempty"`
|
||||
Close bool `json:"close,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// static_response [<matcher>] <status> {
|
||||
// body <text>
|
||||
// close
|
||||
// }
|
||||
//
|
||||
func (s *StaticResponse) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
var statusCodeStr string
|
||||
if d.Args(&statusCodeStr) {
|
||||
s.StatusCode = WeakString(statusCodeStr)
|
||||
}
|
||||
for d.NextBlock() {
|
||||
switch d.Val() {
|
||||
case "body":
|
||||
if s.Body != "" {
|
||||
return d.Err("body already specified")
|
||||
}
|
||||
if !d.Args(&s.Body) {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "close":
|
||||
if s.Close {
|
||||
return d.Err("close already specified")
|
||||
}
|
||||
s.Close = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (StaticResponse) Bucket() int { return 7 }
|
||||
|
||||
func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestStaticResponseHandler(t *testing.T) {
|
|||
w := httptest.NewRecorder()
|
||||
|
||||
s := StaticResponse{
|
||||
StatusCode: weakString(strconv.Itoa(http.StatusNotFound)),
|
||||
StatusCode: WeakString(strconv.Itoa(http.StatusNotFound)),
|
||||
Headers: http.Header{
|
||||
"X-Test": []string{"Testing"},
|
||||
},
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyhttp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "http.handlers.table",
|
||||
New: func() interface{} { return new(tableMiddleware) },
|
||||
})
|
||||
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "http.matchers.table",
|
||||
New: func() interface{} { return new(tableMatcher) },
|
||||
})
|
||||
}
|
||||
|
||||
type tableMiddleware struct {
|
||||
}
|
||||
|
||||
func (t tableMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
|
||||
// tbl := r.Context().Value(TableCtxKey).(map[string]interface{})
|
||||
|
||||
// TODO: implement this...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type tableMatcher struct {
|
||||
}
|
||||
|
||||
func (m tableMatcher) Match(r *http.Request) bool {
|
||||
return false // TODO: implement
|
||||
}
|
||||
|
||||
// Interface guards
|
||||
var _ MiddlewareHandler = (*tableMiddleware)(nil)
|
||||
var _ RequestMatcher = (*tableMatcher)(nil)
|
63
modules/caddyhttp/templates/caddyfile.go
Normal file
63
modules/caddyhttp/templates/caddyfile.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package templates
|
||||
|
||||
import (
|
||||
"github.com/caddyserver/caddy/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/caddy/caddyconfig/httpcaddyfile"
|
||||
)
|
||||
|
||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// templates [<matcher>] {
|
||||
// mime <types...>
|
||||
// between <open_delim> <close_delim>
|
||||
// root <path>
|
||||
// }
|
||||
//
|
||||
func (t *Templates) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
for d.Next() {
|
||||
for d.NextBlock() {
|
||||
switch d.Val() {
|
||||
case "mime":
|
||||
t.MIMETypes = d.RemainingArgs()
|
||||
if len(t.MIMETypes) == 0 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "between":
|
||||
t.Delimiters = d.RemainingArgs()
|
||||
if len(t.Delimiters) != 2 {
|
||||
return d.ArgErr()
|
||||
}
|
||||
case "root":
|
||||
if !d.Args(&t.IncludeRoot) {
|
||||
return d.ArgErr()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.IncludeRoot == "" {
|
||||
t.IncludeRoot = "{http.var.root}"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket returns the HTTP Caddyfile handler bucket number.
|
||||
func (t Templates) Bucket() int { return 5 }
|
||||
|
||||
// Interface guard
|
||||
var _ httpcaddyfile.HandlerDirective = (*Templates)(nil)
|
|
@ -108,7 +108,8 @@ func (t *Templates) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddy
|
|||
func (t *Templates) executeTemplate(rr caddyhttp.ResponseRecorder, r *http.Request) error {
|
||||
var fs http.FileSystem
|
||||
if t.IncludeRoot != "" {
|
||||
fs = http.Dir(t.IncludeRoot)
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
|
||||
fs = http.Dir(repl.ReplaceAll(t.IncludeRoot, "."))
|
||||
}
|
||||
|
||||
ctx := &templateContext{
|
||||
|
|
|
@ -136,19 +136,6 @@ func (c templateContext) Cookie(name string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// Hostname gets the (remote) hostname of the client making the request.
|
||||
// Performance warning: This involves a DNS lookup.
|
||||
func (c templateContext) Hostname() string {
|
||||
ip := c.RemoteIP()
|
||||
|
||||
hostnameList, err := net.LookupAddr(ip)
|
||||
if err != nil || len(hostnameList) == 0 {
|
||||
return c.Req.RemoteAddr
|
||||
}
|
||||
|
||||
return hostnameList[0]
|
||||
}
|
||||
|
||||
// RemoteIP gets the IP address of the client making the request.
|
||||
func (c templateContext) RemoteIP() string {
|
||||
ip, _, err := net.SplitHostPort(c.Req.RemoteAddr)
|
||||
|
|
71
modules/caddyhttp/vars.go
Normal file
71
modules/caddyhttp/vars.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddyhttp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "http.handlers.vars",
|
||||
New: func() interface{} { return new(VarsMiddleware) },
|
||||
})
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "http.matchers.vars",
|
||||
New: func() interface{} { return new(VarsMiddleware) },
|
||||
})
|
||||
}
|
||||
|
||||
// VarsMiddleware is an HTTP middleware which sets variables
|
||||
// in the context, mainly for use by placeholders.
|
||||
type VarsMiddleware map[string]string
|
||||
|
||||
func (t VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
|
||||
vars := r.Context().Value(VarCtxKey).(map[string]interface{})
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
|
||||
for k, v := range t {
|
||||
keyExpanded := repl.ReplaceAll(k, "")
|
||||
valExpanded := repl.ReplaceAll(v, "")
|
||||
vars[keyExpanded] = valExpanded
|
||||
}
|
||||
return next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// VarsMatcher is an HTTP request matcher which can match
|
||||
// requests based on variables in the context.
|
||||
type VarsMatcher map[string]string
|
||||
|
||||
// Match matches a request based on variables in the context.
|
||||
func (m VarsMatcher) Match(r *http.Request) bool {
|
||||
vars := r.Context().Value(VarCtxKey).(map[string]string)
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
|
||||
for k, v := range m {
|
||||
keyExpanded := repl.ReplaceAll(k, "")
|
||||
valExpanded := repl.ReplaceAll(v, "")
|
||||
if vars[keyExpanded] != valExpanded {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Interface guards
|
||||
var (
|
||||
_ MiddlewareHandler = (*VarsMiddleware)(nil)
|
||||
_ RequestMatcher = (*VarsMatcher)(nil)
|
||||
)
|
|
@ -172,7 +172,7 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
|
|||
// add all the cipher suites in order, without duplicates
|
||||
cipherSuitesAdded := make(map[uint16]struct{})
|
||||
for _, csName := range p.CipherSuites {
|
||||
csID := supportedCipherSuites[csName]
|
||||
csID := SupportedCipherSuites[csName]
|
||||
if _, ok := cipherSuitesAdded[csID]; !ok {
|
||||
cipherSuitesAdded[csID] = struct{}{}
|
||||
cfg.CipherSuites = append(cfg.CipherSuites, csID)
|
||||
|
@ -182,7 +182,7 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
|
|||
// add all the curve preferences in order, without duplicates
|
||||
curvesAdded := make(map[tls.CurveID]struct{})
|
||||
for _, curveName := range p.Curves {
|
||||
curveID := supportedCurves[curveName]
|
||||
curveID := SupportedCurves[curveName]
|
||||
if _, ok := curvesAdded[curveID]; !ok {
|
||||
curvesAdded[curveID] = struct{}{}
|
||||
cfg.CurvePreferences = append(cfg.CurvePreferences, curveID)
|
||||
|
@ -203,10 +203,10 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
|
|||
|
||||
// min and max protocol versions
|
||||
if p.ProtocolMin != "" {
|
||||
cfg.MinVersion = supportedProtocols[p.ProtocolMin]
|
||||
cfg.MinVersion = SupportedProtocols[p.ProtocolMin]
|
||||
}
|
||||
if p.ProtocolMax != "" {
|
||||
cfg.MaxVersion = supportedProtocols[p.ProtocolMax]
|
||||
cfg.MaxVersion = SupportedProtocols[p.ProtocolMax]
|
||||
}
|
||||
if p.ProtocolMin > p.ProtocolMax {
|
||||
return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax)
|
||||
|
|
|
@ -25,12 +25,12 @@ import (
|
|||
func init() {
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "tls.certificates.load_files",
|
||||
New: func() interface{} { return fileLoader{} },
|
||||
New: func() interface{} { return FileLoader{} },
|
||||
})
|
||||
}
|
||||
|
||||
// fileLoader loads certificates and their associated keys from disk.
|
||||
type fileLoader []CertKeyFilePair
|
||||
// FileLoader loads certificates and their associated keys from disk.
|
||||
type FileLoader []CertKeyFilePair
|
||||
|
||||
// CertKeyFilePair pairs certificate and key file names along with their
|
||||
// encoding format so that they can be loaded from disk.
|
||||
|
@ -42,7 +42,7 @@ type CertKeyFilePair struct {
|
|||
}
|
||||
|
||||
// LoadCertificates returns the certificates to be loaded by fl.
|
||||
func (fl fileLoader) LoadCertificates() ([]Certificate, error) {
|
||||
func (fl FileLoader) LoadCertificates() ([]Certificate, error) {
|
||||
var certs []Certificate
|
||||
for _, pair := range fl {
|
||||
certData, err := ioutil.ReadFile(pair.Certificate)
|
||||
|
@ -73,4 +73,4 @@ func (fl fileLoader) LoadCertificates() ([]Certificate, error) {
|
|||
}
|
||||
|
||||
// Interface guard
|
||||
var _ CertificateLoader = (fileLoader)(nil)
|
||||
var _ CertificateLoader = (FileLoader)(nil)
|
||||
|
|
|
@ -30,20 +30,20 @@ import (
|
|||
func init() {
|
||||
caddy.RegisterModule(caddy.Module{
|
||||
Name: "tls.certificates.load_folders",
|
||||
New: func() interface{} { return folderLoader{} },
|
||||
New: func() interface{} { return FolderLoader{} },
|
||||
})
|
||||
}
|
||||
|
||||
// folderLoader loads certificates and their associated keys from disk
|
||||
// FolderLoader loads certificates and their associated keys from disk
|
||||
// by recursively walking the specified directories, looking for PEM
|
||||
// files which contain both a certificate and a key.
|
||||
type folderLoader []string
|
||||
type FolderLoader []string
|
||||
|
||||
// LoadCertificates loads all the certificates+keys in the directories
|
||||
// listed in fl from all files ending with .pem. This method of loading
|
||||
// certificates expects the certificate and key to be bundled into the
|
||||
// same file.
|
||||
func (fl folderLoader) LoadCertificates() ([]Certificate, error) {
|
||||
func (fl FolderLoader) LoadCertificates() ([]Certificate, error) {
|
||||
var certs []Certificate
|
||||
for _, dir := range fl {
|
||||
err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error {
|
||||
|
@ -135,4 +135,4 @@ func x509CertFromCertAndKeyPEMFile(fpath string) (tls.Certificate, error) {
|
|||
return cert, nil
|
||||
}
|
||||
|
||||
var _ CertificateLoader = (folderLoader)(nil)
|
||||
var _ CertificateLoader = (FolderLoader)(nil)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
// SessionTicketService configures and manages TLS session tickets.
|
||||
type SessionTicketService struct {
|
||||
KeySource json.RawMessage `json:"key_source,omitempty"`
|
||||
RotationInterval caddy.Duration `json:"rotation_interval,omitempty"`
|
||||
RotationInterval caddy.Duration `json:"rotation_interval,omitempty"`
|
||||
MaxKeys int `json:"max_keys,omitempty"`
|
||||
DisableRotation bool `json:"disable_rotation,omitempty"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
|
|
|
@ -45,8 +45,8 @@ func init() {
|
|||
// TLS represents a process-wide TLS configuration.
|
||||
type TLS struct {
|
||||
Certificates map[string]json.RawMessage `json:"certificates,omitempty"`
|
||||
Automation AutomationConfig `json:"automation,omitempty"`
|
||||
SessionTickets SessionTicketService `json:"session_tickets,omitempty"`
|
||||
Automation AutomationConfig `json:"automation"`
|
||||
SessionTickets SessionTicketService `json:"session_tickets"`
|
||||
|
||||
certificateLoaders []CertificateLoader
|
||||
certCache *certmagic.Cache
|
||||
|
@ -105,16 +105,12 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
|||
onDemandRateLimiter.SetLimit(0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start activates the TLS module.
|
||||
func (t *TLS) Start() error {
|
||||
// load manual/static (unmanaged) certificates - we do this in
|
||||
// provision so that other apps (such as http) can know which
|
||||
// certificates have been manually loaded
|
||||
magic := certmagic.New(t.certCache, certmagic.Config{
|
||||
Storage: t.ctx.Storage(),
|
||||
Storage: ctx.Storage(),
|
||||
})
|
||||
|
||||
// load manual/static (unmanaged) certificates
|
||||
for _, loader := range t.certificateLoaders {
|
||||
certs, err := loader.LoadCertificates()
|
||||
if err != nil {
|
||||
|
@ -128,6 +124,11 @@ func (t *TLS) Start() error {
|
|||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start activates the TLS module.
|
||||
func (t *TLS) Start() error {
|
||||
// load automated (managed) certificates
|
||||
if automatedRawMsg, ok := t.Certificates[automateKey]; ok {
|
||||
var names []string
|
||||
|
@ -204,6 +205,12 @@ func (t *TLS) getAutomationPolicyForName(name string) AutomationPolicy {
|
|||
return AutomationPolicy{Management: mgmt}
|
||||
}
|
||||
|
||||
// CertificatesWithSAN returns the list of all certificates
|
||||
// in the cache the match the given SAN value.
|
||||
func (t *TLS) CertificatesWithSAN(san string) []certmagic.Certificate {
|
||||
return t.certCache.CertificatesWithSAN(san)
|
||||
}
|
||||
|
||||
// CertificateLoader is a type that can load certificates.
|
||||
// Certificates can optionally be associated with tags.
|
||||
type CertificateLoader interface {
|
||||
|
|
|
@ -22,12 +22,16 @@ import (
|
|||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
// supportedCipherSuites is the unordered map of cipher suite
|
||||
// SupportedCipherSuites is the unordered map of cipher suite
|
||||
// string names to their definition in crypto/tls. All values
|
||||
// should be IANA-reserved names. See
|
||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
|
||||
// Two of the cipher suite constants in the standard lib do not use the
|
||||
// full IANA name, but we do; see:
|
||||
// https://github.com/golang/go/issues/32061 and
|
||||
// https://github.com/golang/go/issues/30325#issuecomment-512862374.
|
||||
// TODO: might not be needed much longer: https://github.com/golang/go/issues/30325
|
||||
var supportedCipherSuites = map[string]uint16{
|
||||
var SupportedCipherSuites = map[string]uint16{
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
|
@ -84,22 +88,24 @@ func getOptimalDefaultCipherSuites() []uint16 {
|
|||
return defaultCipherSuitesWithoutAESNI
|
||||
}
|
||||
|
||||
// supportedCurves is the unordered map of supported curves.
|
||||
// SupportedCurves is the unordered map of supported curves.
|
||||
// https://golang.org/pkg/crypto/tls/#CurveID
|
||||
var supportedCurves = map[string]tls.CurveID{
|
||||
"X25519": tls.X25519,
|
||||
"P256": tls.CurveP256,
|
||||
"P384": tls.CurveP384,
|
||||
"P521": tls.CurveP521,
|
||||
var SupportedCurves = map[string]tls.CurveID{
|
||||
// TODO: Use IANA names, probably? see https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8
|
||||
// All named crypto/elliptic curves have secpXXXr1 IANA names.
|
||||
"x25519": tls.X25519, // x25519, 29
|
||||
"p256": tls.CurveP256, // secp256r1, 23
|
||||
"p384": tls.CurveP384, // secp384r1, 24
|
||||
"p521": tls.CurveP521, // secp521r1, 25
|
||||
}
|
||||
|
||||
// supportedCertKeyTypes is all the key types that are supported
|
||||
// for certificates that are obtained through ACME.
|
||||
var supportedCertKeyTypes = map[string]certcrypto.KeyType{
|
||||
"RSA2048": certcrypto.RSA2048,
|
||||
"RSA4096": certcrypto.RSA4096,
|
||||
"P256": certcrypto.EC256,
|
||||
"P384": certcrypto.EC384,
|
||||
"rsa_2048": certcrypto.RSA2048,
|
||||
"rsa_4096": certcrypto.RSA4096,
|
||||
"ec_p256": certcrypto.EC256,
|
||||
"ec_p384": certcrypto.EC384,
|
||||
}
|
||||
|
||||
// defaultCurves is the list of only the curves we want to use
|
||||
|
@ -115,9 +121,9 @@ var defaultCurves = []tls.CurveID{
|
|||
tls.CurveP256,
|
||||
}
|
||||
|
||||
// supportedProtocols is a map of supported protocols.
|
||||
// HTTP/2 only supports TLS 1.2 and higher.
|
||||
var supportedProtocols = map[string]uint16{
|
||||
// SupportedProtocols is a map of supported protocols.
|
||||
// Note that HTTP/2 only supports TLS 1.2 and higher.
|
||||
var SupportedProtocols = map[string]uint16{
|
||||
"tls1.0": tls.VersionTLS10,
|
||||
"tls1.1": tls.VersionTLS11,
|
||||
"tls1.2": tls.VersionTLS12,
|
||||
|
|
Loading…
Reference in a new issue