summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--admin.go17
-rw-r--r--caddy.go49
-rw-r--r--cmd/caddy/main.go1
-rw-r--r--cmd/commandfuncs.go4
-rw-r--r--cmd/commands.go4
-rw-r--r--cmd/main.go13
-rw-r--r--cmd/proc_posix.go2
-rw-r--r--cmd/proc_windows.go2
-rw-r--r--context.go6
-rw-r--r--go.mod6
-rw-r--r--go.sum18
-rw-r--r--logging.go610
-rw-r--r--modules/caddyhttp/caddyhttp.go72
-rw-r--r--modules/caddyhttp/fileserver/staticfiles.go19
-rw-r--r--modules/caddyhttp/marshalers.go89
-rw-r--r--modules/caddyhttp/replacer.go32
-rw-r--r--modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go9
-rw-r--r--modules/caddyhttp/reverseproxy/reverseproxy.go9
-rw-r--r--modules/caddyhttp/rewrite/rewrite.go40
-rw-r--r--modules/caddyhttp/server.go207
-rw-r--r--modules/caddyhttp/staticresp.go2
-rw-r--r--modules/caddyhttp/subroute.go25
-rw-r--r--modules/caddyhttp/vars.go4
-rw-r--r--modules/caddytls/tls.go6
-rw-r--r--modules/logging/encoders.go268
-rw-r--r--modules/logging/filewriter.go91
-rw-r--r--modules/logging/filterencoder.go321
-rw-r--r--modules/logging/filters.go94
-rw-r--r--modules/logging/nopencoder.go114
-rw-r--r--replacer.go67
-rw-r--r--replacer_test.go4
-rw-r--r--sigtrap.go14
-rw-r--r--sigtrap_posix.go12
-rw-r--r--usagepool.go204
34 files changed, 2234 insertions, 201 deletions
diff --git a/admin.go b/admin.go
index db8b70022..0b4affde4 100644
--- a/admin.go
+++ b/admin.go
@@ -34,6 +34,7 @@ import (
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/mholt/certmagic"
"github.com/rs/cors"
+ "go.uber.org/zap"
)
var (
@@ -52,6 +53,8 @@ var DefaultAdminConfig = &AdminConfig{
Listen: DefaultAdminListen,
}
+// TODO: holy smokes, the admin endpoint might not have to live in caddy's core.
+
// StartAdmin starts Caddy's administration endpoint,
// bootstrapping it with an optional configuration
// in the format of JSON bytes. It opens a listener
@@ -113,7 +116,15 @@ func StartAdmin(initialConfigJSON []byte) error {
}
}
- handler := cors.Default().Handler(mux)
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // TODO: improve/organize this logging
+ Log().Named("admin.request").Info("",
+ zap.String("method", r.Method),
+ zap.String("uri", r.RequestURI),
+ zap.String("remote", r.RemoteAddr),
+ )
+ cors.Default().Handler(mux).ServeHTTP(w, r)
+ })
cfgEndptSrv = &http.Server{
Handler: handler,
@@ -125,14 +136,14 @@ func StartAdmin(initialConfigJSON []byte) error {
go cfgEndptSrv.Serve(ln)
- log.Println("Caddy 2 admin endpoint listening on", adminConfig.Listen)
+ fmt.Println("Caddy 2 admin endpoint listening on", adminConfig.Listen)
if len(initialConfigJSON) > 0 {
err := Load(bytes.NewReader(initialConfigJSON))
if err != nil {
return fmt.Errorf("loading initial config: %v", err)
}
- log.Println("Caddy 2 serving initial configuration")
+ fmt.Println("Caddy 2 serving initial configuration")
}
return nil
diff --git a/caddy.go b/caddy.go
index fb3e488dd..dd241ee4d 100644
--- a/caddy.go
+++ b/caddy.go
@@ -29,16 +29,13 @@ import (
// Config represents a Caddy configuration.
type Config struct {
- Admin *AdminConfig `json:"admin,omitempty"`
+ Admin *AdminConfig `json:"admin,omitempty"`
+ Logging *Logging `json:"logging,omitempty"`
+ StorageRaw json.RawMessage `json:"storage,omitempty"`
+ AppsRaw map[string]json.RawMessage `json:"apps,omitempty"`
- StorageRaw json.RawMessage `json:"storage,omitempty"`
- storage certmagic.Storage
-
- AppsRaw map[string]json.RawMessage `json:"apps,omitempty"`
-
- // apps stores the decoded Apps values,
- // keyed by module name.
- apps map[string]App
+ apps map[string]App
+ storage certmagic.Storage
cancelFunc context.CancelFunc
}
@@ -75,7 +72,11 @@ func Run(newCfg *Config) error {
// is performed if any modules were provisioned;
// apps that were started already will be stopped,
// so this function should not leak resources if
-// an error is returned.
+// an error is returned. However, if no error is
+// returned and start == false, you should cancel
+// the config if you are not going to start it,
+// so that each provisioned module will be
+// cleaned up.
func run(newCfg *Config, start bool) error {
if newCfg == nil {
return nil
@@ -102,12 +103,30 @@ func run(newCfg *Config, start bool) error {
ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg})
defer func() {
if err != nil {
- cancel() // clean up now
+ // if there were any errors during startup,
+ // we should cancel the new context we created
+ // since the associated config won't be used;
+ // this will cause all modules that were newly
+ // provisioned to clean themselves up
+ cancel()
+
+ // also undo any other state changes we made
+ if currentCfg != nil {
+ certmagic.Default.Storage = currentCfg.storage
+ }
}
}()
newCfg.cancelFunc = cancel // clean up later
- // set up storage and make it CertMagic's default storage, too
+ // set up logging before anything bad happens
+ if newCfg.Logging != nil {
+ err := newCfg.Logging.openLogs(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ // set up global storage and make it CertMagic's default storage, too
err = func() error {
if newCfg.StorageRaw != nil {
val, err := ctx.LoadModuleInline("module", "caddy.storage", newCfg.StorageRaw)
@@ -214,7 +233,11 @@ func unsyncedStop(cfg *Config) {
// Validate loads, provisions, and validates
// cfg, but does not start running it.
func Validate(cfg *Config) error {
- return run(cfg, false)
+ err := run(cfg, false)
+ if err == nil {
+ cfg.cancelFunc() // call Cleanup on all modules
+ }
+ return err
}
// Duration is a JSON-string-unmarshable duration type.
diff --git a/cmd/caddy/main.go b/cmd/caddy/main.go
index 4743cd767..bd29b7bb5 100644
--- a/cmd/caddy/main.go
+++ b/cmd/caddy/main.go
@@ -40,6 +40,7 @@ import (
_ "github.com/caddyserver/caddy/v2/modules/caddytls"
_ "github.com/caddyserver/caddy/v2/modules/caddytls/standardstek"
_ "github.com/caddyserver/caddy/v2/modules/filestorage"
+ _ "github.com/caddyserver/caddy/v2/modules/logging"
)
func main() {
diff --git a/cmd/commandfuncs.go b/cmd/commandfuncs.go
index ec9d9747f..7c08c2ef6 100644
--- a/cmd/commandfuncs.go
+++ b/cmd/commandfuncs.go
@@ -387,7 +387,7 @@ func cmdAdaptConfig(fl Flags) (int, error) {
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
- log.Printf("[WARNING][%s] %s:%d: %s", adaptCmdAdapterFlag, warn.File, warn.Line, msg)
+ fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", adaptCmdAdapterFlag, warn.File, warn.Line, msg)
}
// print result to stdout
@@ -436,7 +436,7 @@ func cmdValidateConfig(fl Flags) (int, error) {
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
- log.Printf("[WARNING][%s] %s:%d: %s", validateCmdAdapterFlag, warn.File, warn.Line, msg)
+ fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", validateCmdAdapterFlag, warn.File, warn.Line, msg)
}
input = adaptedConfig
diff --git a/cmd/commands.go b/cmd/commands.go
index db9848d88..7f43cd068 100644
--- a/cmd/commands.go
+++ b/cmd/commands.go
@@ -130,9 +130,9 @@ not quit after printing, and can be useful for troubleshooting.`,
RegisterCommand(Command{
Name: "stop",
Func: cmdStop,
- Short: "Gracefully stops the running Caddy process",
+ Short: "Gracefully stops a started Caddy process",
Long: `
-Stops the running Caddy process as gracefully as possible.
+Stops the background Caddy process as gracefully as possible.
On Windows, this stop is forceful and Caddy will not have an opportunity to
clean up any active locks; for a graceful shutdown on Windows, use Ctrl+C
diff --git a/cmd/main.go b/cmd/main.go
index 6447e73be..9997cc401 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -20,7 +20,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"net"
"os"
"strconv"
@@ -38,7 +37,7 @@ func Main() {
switch len(os.Args) {
case 0:
- log.Printf("[FATAL] no arguments provided by OS; args[0] must be command")
+ fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
case 1:
os.Args = append(os.Args, "help")
@@ -49,9 +48,9 @@ func Main() {
if !ok {
if strings.HasPrefix(os.Args[1], "-") {
// user probably forgot to type the subcommand
- log.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
+ fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
} else {
- log.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'", os.Args[1])
+ fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
}
os.Exit(caddy.ExitCodeFailedStartup)
}
@@ -63,13 +62,13 @@ func Main() {
err := fs.Parse(os.Args[2:])
if err != nil {
- log.Println(err)
+ fmt.Println(err)
os.Exit(caddy.ExitCodeFailedStartup)
}
exitCode, err := subcommand.Func(Flags{fs})
if err != nil {
- log.Printf("%s: %v", subcommand.Name, err)
+ fmt.Printf("%s: %v\n", subcommand.Name, err)
}
os.Exit(exitCode)
@@ -149,7 +148,7 @@ func loadConfig(configFile, adapterName string) ([]byte, error) {
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
- fmt.Printf("[WARNING][%s] %s:%d: %s", adapterName, warn.File, warn.Line, msg)
+ fmt.Printf("[WARNING][%s] %s:%d: %s\n", adapterName, warn.File, warn.Line, msg)
}
config = adaptedConfig
}
diff --git a/cmd/proc_posix.go b/cmd/proc_posix.go
index 7a53193d9..199c6144b 100644
--- a/cmd/proc_posix.go
+++ b/cmd/proc_posix.go
@@ -24,7 +24,7 @@ import (
)
func gracefullyStopProcess(pid int) error {
- fmt.Printf("Graceful stop...")
+ fmt.Printf("Graceful stop...\n")
err := syscall.Kill(pid, syscall.SIGINT)
if err != nil {
return fmt.Errorf("kill: %v", err)
diff --git a/cmd/proc_windows.go b/cmd/proc_windows.go
index d33394872..dd45234ed 100644
--- a/cmd/proc_windows.go
+++ b/cmd/proc_windows.go
@@ -23,7 +23,7 @@ import (
)
func gracefullyStopProcess(pid int) error {
- fmt.Printf("Forceful Stop...")
+ fmt.Printf("Forceful Stop...\n")
// process on windows will not stop unless forced with /f
cmd := exec.Command("taskkill", "/pid", strconv.Itoa(pid), "/f")
if err := cmd.Run(); err != nil {
diff --git a/context.go b/context.go
index d196c71b6..32368a9a4 100644
--- a/context.go
+++ b/context.go
@@ -22,6 +22,7 @@ import (
"reflect"
"github.com/mholt/certmagic"
+ "go.uber.org/zap"
)
// Context is a type which defines the lifetime of modules that
@@ -206,3 +207,8 @@ func (ctx Context) App(name string) (interface{}, error) {
func (ctx Context) Storage() certmagic.Storage {
return ctx.cfg.storage
}
+
+// Logger returns a logger that can be used by mod.
+func (ctx Context) Logger(mod Module) *zap.Logger {
+ return ctx.cfg.Logging.Logger(mod)
+}
diff --git a/go.mod b/go.mod
index 0ab0c8b83..52fa150ef 100644
--- a/go.mod
+++ b/go.mod
@@ -11,18 +11,24 @@ require (
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc
github.com/ilibs/json5 v1.0.1
github.com/imdario/mergo v0.3.8 // indirect
+ github.com/jsternberg/zap-logfmt v1.2.0
github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19
github.com/klauspost/compress v1.8.6
github.com/klauspost/cpuid v1.2.1
github.com/lucas-clemente/quic-go v0.12.1
github.com/mholt/certmagic v0.8.3
github.com/muhammadmuzzammil1998/jsonc v0.0.0-20190906142622-1265e9b150c6
+ github.com/onsi/ginkgo v1.8.0 // indirect
+ github.com/onsi/gomega v1.5.0 // indirect
github.com/rs/cors v1.7.0
github.com/russross/blackfriday/v2 v2.0.1
github.com/starlight-go/starlight v0.0.0-20181207205707-b06f321544f3
github.com/vulcand/oxy v1.0.0
go.starlark.net v0.0.0-20190919145610-979af19b165c
+ go.uber.org/multierr v1.2.0 // indirect
+ go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20191010185427-af544f31c8ac
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
diff --git a/go.sum b/go.sum
index 8023c8d92..ea4ff17db 100644
--- a/go.sum
+++ b/go.sum
@@ -15,6 +15,7 @@ github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocm
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
@@ -134,6 +135,8 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o=
+github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19 h1:WjT3fLi9n8YWh/Ih8Q1LHAPsTqGddPcHqscN+PJ3i68=
@@ -201,13 +204,18 @@ github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXW
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -268,8 +276,15 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.starlark.net v0.0.0-20190919145610-979af19b165c h1:WR7X1xgXJlXhQBdorVc9Db3RhwG+J/kp6bLuMyJjfVw=
go.starlark.net v0.0.0-20190919145610-979af19b165c/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg=
+go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4=
+go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -369,6 +384,9 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc h1:GAcf+t0o8gdJAdSFYdE9wChu4bIyguMVqz0RHiFL5VY=
gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw=
gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
diff --git a/logging.go b/logging.go
new file mode 100644
index 000000000..a70a7ea67
--- /dev/null
+++ b/logging.go
@@ -0,0 +1,610 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+func init() {
+ RegisterModule(StdoutWriter{})
+ RegisterModule(StderrWriter{})
+ RegisterModule(DiscardWriter{})
+}
+
+// Logging facilitates logging within Caddy.
+type Logging struct {
+ Sink *StandardLibLog `json:"sink,omitempty"`
+ Logs map[string]*CustomLog `json:"logs,omitempty"`
+
+ // a list of all keys for open writers; all writers
+ // that are opened to provision this logging config
+ // must have their keys added to this list so they
+ // can be closed when cleaning up
+ writerKeys []string
+}
+
+// openLogs sets up the config and opens all the configured writers.
+// It closes its logs when ctx is cancelled, so it should clean up
+// after itself.
+func (logging *Logging) openLogs(ctx Context) error {
+ // make sure to deallocate resources when context is done
+ ctx.OnCancel(func() {
+ err := logging.closeLogs()
+ if err != nil {
+ Log().Error("closing logs", zap.Error(err))
+ }
+ })
+
+ // set up the "sink" log first (std lib's default global logger)
+ if logging.Sink != nil {
+ err := logging.Sink.provision(ctx, logging)
+ if err != nil {
+ return fmt.Errorf("setting up sink log: %v", err)
+ }
+ }
+
+ // as a special case, set up the default structured Caddy log next
+ if err := logging.setupNewDefault(ctx); err != nil {
+ return err
+ }
+
+ // then set up any other custom logs
+ for name, l := range logging.Logs {
+ // the default log is already set up
+ if name == "default" {
+ continue
+ }
+
+ err := l.provision(ctx, logging)
+ if err != nil {
+ return fmt.Errorf("setting up custom log '%s': %v", name, err)
+ }
+
+ // Any other logs that use the discard writer can be deleted
+ // entirely. This avoids encoding and processing of each
+ // log entry that would just be thrown away anyway. Notably,
+ // we do not reach this point for the default log, which MUST
+ // exist, otherwise core log emissions would panic because
+ // they use the Log() function directly which expects a non-nil
+ // logger. Even if we keep logs with a discard writer, they
+ // have a nop core, and keeping them at all seems unnecessary.
+ if _, ok := l.writerOpener.(*DiscardWriter); ok {
+ delete(logging.Logs, name)
+ continue
+ }
+ }
+
+ return nil
+}
+
+func (logging *Logging) setupNewDefault(ctx Context) error {
+ if logging.Logs == nil {
+ logging.Logs = make(map[string]*CustomLog)
+ }
+
+ // extract the user-defined default log, if any
+ newDefault := new(defaultCustomLog)
+ if userDefault, ok := logging.Logs["default"]; ok {
+ newDefault.CustomLog = userDefault
+ } else {
+ // if none, make one with our own default settings
+ var err error
+ newDefault, err = newDefaultProductionLog()
+ if err != nil {
+ return fmt.Errorf("setting up default Caddy log: %v", err)
+ }
+ logging.Logs["default"] = newDefault.CustomLog
+ }
+
+ // set up this new log
+ err := newDefault.CustomLog.provision(ctx, logging)
+ if err != nil {
+ return fmt.Errorf("setting up default log: %v", err)
+ }
+ newDefault.logger = zap.New(newDefault.CustomLog.core)
+
+ // redirect the default caddy logs
+ defaultLoggerMu.Lock()
+ oldDefault := defaultLogger
+ defaultLogger = newDefault
+ defaultLoggerMu.Unlock()
+
+ // if the new writer is different, indicate it in the logs for convenience
+ var newDefaultLogWriterKey, currentDefaultLogWriterKey string
+ var newDefaultLogWriterStr, currentDefaultLogWriterStr string
+ if newDefault.writerOpener != nil {
+ newDefaultLogWriterKey = newDefault.writerOpener.WriterKey()
+ newDefaultLogWriterStr = newDefault.writerOpener.String()
+ }
+ if oldDefault.writerOpener != nil {
+ currentDefaultLogWriterKey = oldDefault.writerOpener.WriterKey()
+ currentDefaultLogWriterStr = oldDefault.writerOpener.String()
+ }
+ if newDefaultLogWriterKey != currentDefaultLogWriterKey {
+ oldDefault.logger.Info("redirected default logger",
+ zap.String("from", currentDefaultLogWriterStr),
+ zap.String("to", newDefaultLogWriterStr),
+ )
+ }
+
+ return nil
+}
+
+// closeLogs cleans up resources allocated during openLogs.
+// A successful call to openLogs calls this automatically
+// when the context is cancelled.
+func (logging *Logging) closeLogs() error {
+ for _, key := range logging.writerKeys {
+ _, err := writers.Delete(key)
+ if err != nil {
+ log.Printf("[ERROR] Closing log writer %v: %v", key, err)
+ }
+ }
+ return nil
+}
+
+// Logger returns a logger that is ready for the module to use.
+func (logging *Logging) Logger(mod Module) *zap.Logger {
+ modName := mod.CaddyModule().Name
+ var cores []zapcore.Core
+
+ if logging != nil {
+ for _, l := range logging.Logs {
+ if l.matchesModule(modName) {
+ if len(l.Include) == 0 && len(l.Exclude) == 0 {
+ cores = append(cores, l.core)
+ continue
+ }
+ cores = append(cores, &filteringCore{Core: l.core, cl: l})
+ }
+ }
+ }
+
+ multiCore := zapcore.NewTee(cores...)
+
+ return zap.New(multiCore).Named(modName)
+}
+
+// openWriter opens a writer using opener, and returns true if
+// the writer is new, or false if the writer already exists.
+func (logging *Logging) openWriter(opener WriterOpener) (io.WriteCloser, bool, error) {
+ key := opener.WriterKey()
+ writer, loaded, err := writers.LoadOrNew(key, func() (Destructor, error) {
+ w, err := opener.OpenWriter()
+ return writerDestructor{w}, err
+ })
+ if err == nil {
+ logging.writerKeys = append(logging.writerKeys, key)
+ }
+ return writer.(io.WriteCloser), !loaded, err
+}
+
+// WriterOpener is a module that can open a log writer.
+// It can return a human-readable string representation
+// of itself so that operators can understand where
+// the logs are going.
+type WriterOpener interface {
+ fmt.Stringer
+
+ // WriterKey is a string that uniquely identifies this
+ // writer configuration. It is not shown to humans.
+ WriterKey() string
+
+ // OpenWriter opens a log for writing. The writer
+ // should be safe for concurrent use but need not
+ // be synchronous.
+ OpenWriter() (io.WriteCloser, error)
+}
+
+type writerDestructor struct {
+ io.WriteCloser
+}
+
+func (wdest writerDestructor) Destruct() error {
+ return wdest.Close()
+}
+
+// StandardLibLog configures the default Go standard library
+// global logger in the log package. This is necessary because
+// module dependencies which are not built specifically for
+// Caddy will use the standard logger.
+type StandardLibLog struct {
+ WriterRaw json.RawMessage `json:"writer,omitempty"`
+
+ writer io.WriteCloser
+}
+
+func (sll *StandardLibLog) provision(ctx Context, logging *Logging) error {
+ if sll.WriterRaw != nil {
+ val, err := ctx.LoadModuleInline("output", "caddy.logging.writers", sll.WriterRaw)
+ if err != nil {
+ return fmt.Errorf("loading sink log writer module: %v", err)
+ }
+ wo := val.(WriterOpener)
+ sll.WriterRaw = nil // allow GC to deallocate
+
+ var isNew bool
+ sll.writer, isNew, err = logging.openWriter(wo)
+ if err != nil {
+ return fmt.Errorf("opening sink log writer %#v: %v", val, err)
+ }
+
+ if isNew {
+ log.Printf("[INFO] Redirecting sink to: %s", wo)
+ log.SetOutput(sll.writer)
+ log.Printf("[INFO] Redirected sink to here (%s)", wo)
+ }
+ }
+
+ return nil
+}
+
+// CustomLog represents a custom logger configuration.
+type CustomLog struct {
+ WriterRaw json.RawMessage `json:"writer,omitempty"`
+ EncoderRaw json.RawMessage `json:"encoder,omitempty"`
+ Level string `json:"level,omitempty"`
+ Sampling *LogSampling `json:"sampling,omitempty"`
+ Include []string `json:"include,omitempty"`
+ Exclude []string `json:"exclude,omitempty"`
+
+ writerOpener WriterOpener
+ writer io.WriteCloser
+ encoder zapcore.Encoder
+ levelEnabler zapcore.LevelEnabler
+ core zapcore.Core
+}
+
+func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
+ // set up the log level
+ switch cl.Level {
+ case "debug":
+ cl.levelEnabler = zapcore.DebugLevel
+ case "", "info":
+ cl.levelEnabler = zapcore.InfoLevel
+ case "warn":
+ cl.levelEnabler = zapcore.WarnLevel
+ case "error":
+ cl.levelEnabler = zapcore.ErrorLevel
+ case "panic":
+ cl.levelEnabler = zapcore.PanicLevel
+ case "fatal":
+ cl.levelEnabler = zapcore.FatalLevel
+ default:
+ return fmt.Errorf("unrecognized log level: %s", cl.Level)
+ }
+
+ // If both Include and Exclude lists are populated, then each item must
+ // be a superspace or subspace of an item in the other list, because
+ // populating both lists means that any given item is either a rule
+ // or an exception to another rule. But if the item is not a super-
+ // or sub-space of any item in the other list, it is neither a rule
+ // nor an exception, and is a contradiction. Ensure, too, that the
+ // sets do not intersect, which is also a contradiction.
+ if len(cl.Include) > 0 && len(cl.Exclude) > 0 {
+ // prevent intersections
+ for _, allow := range cl.Include {
+ for _, deny := range cl.Exclude {
+ if allow == deny {
+ return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
+ }
+ }
+ }
+
+ // ensure namespaces are nested
+ outer:
+ for _, allow := range cl.Include {
+ for _, deny := range cl.Exclude {
+ if strings.HasPrefix(allow+".", deny+".") ||
+ strings.HasPrefix(deny+".", allow+".") {
+ continue outer
+ }
+ }
+ return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow)
+ }
+ }
+
+ if cl.EncoderRaw != nil {
+ val, err := ctx.LoadModuleInline("format", "caddy.logging.encoders", cl.EncoderRaw)
+ if err != nil {
+ return fmt.Errorf("loading log encoder module: %v", err)
+ }
+ cl.EncoderRaw = nil // allow GC to deallocate
+ cl.encoder = val.(zapcore.Encoder)
+ }
+ if cl.encoder == nil {
+ cl.encoder = zapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig())
+ }
+
+ if cl.WriterRaw != nil {
+ val, err := ctx.LoadModuleInline("output", "caddy.logging.writers", cl.WriterRaw)
+ if err != nil {
+ return fmt.Errorf("loading log writer module: %v", err)
+ }
+ cl.WriterRaw = nil // allow GC to deallocate
+ cl.writerOpener = val.(WriterOpener)
+ }
+ if cl.writerOpener == nil {
+ cl.writerOpener = StderrWriter{}
+ }
+ var err error
+ cl.writer, _, err = logging.openWriter(cl.writerOpener)
+ if err != nil {
+ return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err)
+ }
+
+ cl.buildCore()
+
+ return nil
+}
+
+func (cl *CustomLog) buildCore() {
+ // logs which only discard their output don't need
+ // to perform encoding or any other processing steps
+ // at all, so just shorcut to a nop core instead
+ if _, ok := cl.writerOpener.(*DiscardWriter); ok {
+ cl.core = zapcore.NewNopCore()
+ return
+ }
+ c := zapcore.NewCore(
+ cl.encoder,
+ zapcore.AddSync(cl.writer),
+ cl.levelEnabler,
+ )
+ if cl.Sampling != nil {
+ if cl.Sampling.Interval == 0 {
+ cl.Sampling.Interval = 1 * time.Second
+ }
+ if cl.Sampling.First == 0 {
+ cl.Sampling.First = 100
+ }
+ if cl.Sampling.Thereafter == 0 {
+ cl.Sampling.Thereafter = 100
+ }
+ c = zapcore.NewSampler(c, cl.Sampling.Interval,
+ cl.Sampling.First, cl.Sampling.Thereafter)
+ }
+ cl.core = c
+}
+
+func (cl *CustomLog) matchesModule(moduleName string) bool {
+ return cl.loggerAllowed(moduleName, true)
+}
+
+// loggerAllowed returns true if name is allowed to emit
+// to cl. isModule should be true if name is the name of
+// a module and you want to see if ANY of that module's
+// logs would be permitted.
+func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool {
+ // accept all loggers by default
+ if len(cl.Include) == 0 && len(cl.Exclude) == 0 {
+ return true
+ }
+
+ // append a dot so that partial names don't match
+ // (i.e. we don't want "foo.b" to match "foo.bar"); we
+ // will also have to append a dot when we do HasPrefix
+ // below to compensate for when when namespaces are equal
+ if name != "" && name != "*" && name != "." {
+ name += "."
+ }
+
+ var longestAccept, longestReject int
+
+ if len(cl.Include) > 0 {
+ for _, namespace := range cl.Include {
+ var hasPrefix bool
+ if isModule {
+ hasPrefix = strings.HasPrefix(namespace+".", name)
+ } else {
+ hasPrefix = strings.HasPrefix(name, namespace+".")
+ }
+ if hasPrefix && len(namespace) > longestAccept {
+ longestAccept = len(namespace)
+ }
+ }
+ // the include list was populated, meaning that
+ // a match in this list is absolutely required
+ // if we are to accept the entry
+ if longestAccept == 0 {
+ return false
+ }
+ }
+
+ if len(cl.Exclude) > 0 {
+ for _, namespace := range cl.Exclude {
+ // * == all logs emitted by modules
+ // . == all logs emitted by core
+ if (namespace == "*" && name != ".") ||
+ (namespace == "." && name == ".") {
+ return false
+ }
+ if strings.HasPrefix(name, namespace+".") &&
+ len(namespace) > longestReject {
+ longestReject = len(namespace)
+ }
+ }
+ // the reject list is populated, so we have to
+ // reject this entry if its match is better
+ // than the best from the accept list
+ if longestReject > longestAccept {
+ return false
+ }
+ }
+
+ return (longestAccept > longestReject) ||
+ (len(cl.Include) == 0 && longestReject == 0)
+}
+
+// filteringCore filters log entries based on logger name,
+// according to the rules of a CustomLog.
+type filteringCore struct {
+ zapcore.Core
+ cl *CustomLog
+}
+
+// With properly wraps With.
+func (fc *filteringCore) With(fields []zapcore.Field) zapcore.Core {
+ return &filteringCore{
+ Core: fc.Core.With(fields),
+ cl: fc.cl,
+ }
+}
+
+// Check only allows the log entry if its logger name
+// is allowed from the include/exclude rules of fc.cl.
+func (fc *filteringCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ if fc.cl.loggerAllowed(e.LoggerName, false) {
+ return fc.Core.Check(e, ce)
+ }
+ return ce
+}
+
+// LogSampling configures log entry sampling.
+type LogSampling struct {
+ Interval time.Duration `json:"interval,omitempty"`
+ First int `json:"first,omitempty"`
+ Thereafter int `json:"thereafter,omitempty"`
+}
+
+type (
+ // StdoutWriter can write logs to stdout.
+ StdoutWriter struct{}
+
+ // StderrWriter can write logs to stdout.
+ StderrWriter struct{}
+
+ // DiscardWriter discards all writes.
+ DiscardWriter struct{}
+)
+
+// CaddyModule returns the Caddy module information.
+func (StdoutWriter) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ Name: "caddy.logging.writers.stdout",
+ New: func() Module { return new(StdoutWriter) },
+ }
+}
+
+// CaddyModule returns the Caddy module information.
+func (StderrWriter) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ Name: "caddy.logging.writers.stderr",
+ New: func() Module { return new(StderrWriter) },
+ }
+}
+
+// CaddyModule returns the Caddy module information.
+func (DiscardWriter) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ Name: "caddy.logging.writers.discard",
+ New: func() Module { return new(DiscardWriter) },
+ }
+}
+
+func (StdoutWriter) String() string { return "stdout" }
+func (StderrWriter) String() string { return "stderr" }
+func (DiscardWriter) String() string { return "discard" }
+
+// WriterKey returns a unique key representing stdout.
+func (StdoutWriter) WriterKey() string { return "std:out" }
+
+// WriterKey returns a unique key representing stderr.
+func (StderrWriter) WriterKey() string { return "std:err" }
+
+// WriterKey returns a unique key representing discard.
+func (DiscardWriter) WriterKey() string { return "discard" }
+
+// OpenWriter returns os.Stdout that can't be closed.
+func (StdoutWriter) OpenWriter() (io.WriteCloser, error) {
+ return notClosable{os.Stdout}, nil
+}
+
+// OpenWriter returns os.Stderr that can't be closed.
+func (StderrWriter) OpenWriter() (io.WriteCloser, error) {
+ return notClosable{os.Stderr}, nil
+}
+
+// OpenWriter returns ioutil.Discard that can't be closed.
+func (DiscardWriter) OpenWriter() (io.WriteCloser, error) {
+ return notClosable{ioutil.Discard}, nil
+}
+
+// notClosable is an io.WriteCloser that can't be closed.
+type notClosable struct{ io.Writer }
+
+func (fc notClosable) Close() error { return nil }
+
+type defaultCustomLog struct {
+ *CustomLog
+ logger *zap.Logger
+}
+
+// newDefaultProductionLog configures a custom log that is
+// intended for use by default if no other log is specified
+// in a config. It writes to stderr, uses the console encoder,
+// and enables INFO-level logs and higher.
+func newDefaultProductionLog() (*defaultCustomLog, error) {
+ cl := new(CustomLog)
+ cl.writerOpener = StderrWriter{}
+ var err error
+ cl.writer, err = cl.writerOpener.OpenWriter()
+ if err != nil {
+ return nil, err
+ }
+ encCfg := zap.NewProductionEncoderConfig()
+ cl.encoder = zapcore.NewConsoleEncoder(encCfg)
+ cl.levelEnabler = zapcore.InfoLevel
+
+ cl.buildCore()
+
+ return &defaultCustomLog{
+ CustomLog: cl,
+ logger: zap.New(cl.core),
+ }, nil
+}
+
+// Log returns the current default logger.
+func Log() *zap.Logger {
+ defaultLoggerMu.RLock()
+ defer defaultLoggerMu.RUnlock()
+ return defaultLogger.logger
+}
+
+var (
+ defaultLogger, _ = newDefaultProductionLog()
+ defaultLoggerMu sync.RWMutex
+)
+
+var writers = NewUsagePool()
+
+// Interface guards
+var (
+ _ io.WriteCloser = (*notClosable)(nil)
+ _ WriterOpener = (*StdoutWriter)(nil)
+ _ WriterOpener = (*StderrWriter)(nil)
+)
diff --git a/modules/caddyhttp/caddyhttp.go b/modules/caddyhttp/caddyhttp.go
index 29a5ab071..aae65abeb 100644
--- a/modules/caddyhttp/caddyhttp.go
+++ b/modules/caddyhttp/caddyhttp.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
- "log"
weakrand "math/rand"
"net"
"net/http"
@@ -33,6 +32,7 @@ import (
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/lucas-clemente/quic-go/http3"
"github.com/mholt/certmagic"
+ "go.uber.org/zap"
)
func init() {
@@ -40,7 +40,7 @@ func init() {
err := caddy.RegisterModule(App{})
if err != nil {
- log.Fatal(err)
+ caddy.Log().Fatal(err.Error())
}
}
@@ -55,7 +55,8 @@ type App struct {
h3servers []*http3.Server
h3listeners []net.PacketConn
- ctx caddy.Context
+ ctx caddy.Context
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -69,10 +70,15 @@ func (App) CaddyModule() caddy.ModuleInfo {
// Provision sets up the app.
func (app *App) Provision(ctx caddy.Context) error {
app.ctx = ctx
+ app.logger = ctx.Logger(app)
repl := caddy.NewReplacer()
- for _, srv := range app.Servers {
+ for srvName, srv := range app.Servers {
+ srv.logger = app.logger.Named("log")
+ srv.accessLogger = app.logger.Named("log.access")
+ srv.errorLogger = app.logger.Named("log.error")
+
if srv.AutoHTTPS == nil {
// avoid nil pointer dereferences
srv.AutoHTTPS = new(AutoHTTPSConfig)
@@ -93,20 +99,25 @@ func (app *App) Provision(ctx caddy.Context) error {
}
for i := range srv.Listen {
- srv.Listen[i] = repl.ReplaceAll(srv.Listen[i], "")
+ lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
+ if err != nil {
+ return fmt.Errorf("server %s, listener %d: %v",
+ srvName, i, err)
+ }
+ srv.Listen[i] = lnOut
}
if srv.Routes != nil {
err := srv.Routes.Provision(ctx)
if err != nil {
- return fmt.Errorf("setting up server routes: %v", err)
+ return fmt.Errorf("server %s: setting up server routes: %v", srvName, err)
}
}
if srv.Errors != nil {
err := srv.Errors.Routes.Provision(ctx)
if err != nil {
- return fmt.Errorf("setting up server error handling routes: %v", err)
+ return fmt.Errorf("server %s: setting up server error handling routes: %v", srvName, err)
}
}
@@ -194,7 +205,9 @@ func (app *App) Start() error {
/////////
// TODO: HTTP/3 support is experimental for now
if srv.ExperimentalHTTP3 {
- log.Printf("[INFO] Enabling experimental HTTP/3 listener on %s", addr)
+ app.logger.Info("enabling experimental HTTP/3 listener",
+ zap.String("addr", addr),
+ )
h3ln, err := caddy.ListenPacket("udp", addr)
if err != nil {
return fmt.Errorf("getting HTTP/3 UDP listener: %v", err)
@@ -284,19 +297,25 @@ func (app *App) automaticHTTPS() error {
// skip if all listeners use the HTTP port
if !srv.listenersUseAnyPortOtherThan(app.httpPort()) {
- log.Printf("[INFO] Server %s is only listening on the HTTP port %d, so no automatic HTTPS will be applied to this server",
- srvName, app.httpPort())
+ app.logger.Info("server is only listening on the HTTP port, so no automatic HTTPS will be applied to this server",
+ zap.String("server_name", srvName),
+ zap.Int("http_port", app.httpPort()),
+ )
continue
}
// find all qualifying domain names, de-duplicated
domainSet := make(map[string]struct{})
- for _, route := range srv.Routes {
- for _, matcherSet := range route.MatcherSets {
- for _, m := range matcherSet {
+ for routeIdx, route := range srv.Routes {
+ for matcherSetIdx, matcherSet := range route.MatcherSets {
+ for matcherIdx, m := range matcherSet {
if hm, ok := m.(*MatchHost); ok {
- for _, d := range *hm {
- d = repl.ReplaceAll(d, "")
+ for hostMatcherIdx, d := range *hm {
+ d, err = repl.ReplaceOrErr(d, true, false)
+ if err != nil {
+ return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v",
+ srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err)
+ }
if certmagic.HostQualifies(d) &&
!srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.Skip) {
domainSet[d] = struct{}{}
@@ -318,7 +337,10 @@ func (app *App) automaticHTTPS() error {
// supposed to ignore loaded certificates
if !srv.AutoHTTPS.IgnoreLoadedCerts &&
len(tlsApp.AllMatchingCertificates(d)) > 0 {
- log.Printf("[INFO][%s] Skipping automatic certificate management because one or more matching certificates are already loaded", d)
+ app.logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded",
+ zap.String("domain", d),
+ zap.String("server_name", srvName),
+ )
continue
}
domainsForCerts = append(domainsForCerts, d)
@@ -351,7 +373,9 @@ func (app *App) automaticHTTPS() error {
})
// manage their certificates
- log.Printf("[INFO] Enabling automatic TLS certificate management for %v", domainsForCerts)
+ app.logger.Info("enabling automatic TLS certificate management",
+ zap.Strings("domains", domainsForCerts),
+ )
err := tlsApp.Manage(domainsForCerts)
if err != nil {
return fmt.Errorf("%s: managing certificate for %s: %s", srvName, domains, err)
@@ -368,7 +392,9 @@ func (app *App) automaticHTTPS() error {
continue
}
- log.Printf("[INFO] Enabling automatic HTTP->HTTPS redirects for %v", domains)
+ app.logger.Info("enabling automatic HTTP->HTTPS redirects",
+ zap.Strings("domains", domains),
+ )
// create HTTP->HTTPS redirects
for _, addr := range srv.Listen {
@@ -434,8 +460,10 @@ func (app *App) automaticHTTPS() error {
// that the redirect runs from; simply append our
// redirect route to the existing routes, with a
// caveat that their config might override ours
- log.Printf("[WARNING] Server %s is listening on %s, so automatic HTTP->HTTPS redirects might be overridden by your own configuration",
- srvName, addr)
+ app.logger.Warn("server is listening on same interface as redirects, so automatic HTTP->HTTPS redirects might be overridden by your own configuration",
+ zap.String("server_name", srvName),
+ zap.String("interface", addr),
+ )
srv.Routes = append(srv.Routes, redirRoute)
continue redirRoutesLoop
}
@@ -524,8 +552,8 @@ var emptyHandler HandlerFunc = func(http.ResponseWriter, *http.Request) error {
// WeakString is a type that unmarshals any JSON value
// as a string literal, with the following exceptions:
-// 1) actual string values are decoded as strings, and
-// 2) null is decoded as empty string
+// 1) actual string values are decoded as strings; and
+// 2) null is decoded as empty string;
// and provides methods for getting the value as various
// primitive types. However, using this type removes any
// type safety as far as deserializing JSON is concerned.
diff --git a/modules/caddyhttp/fileserver/staticfiles.go b/modules/caddyhttp/fileserver/staticfiles.go
index 3e4ccccb7..26c0efe90 100644
--- a/modules/caddyhttp/fileserver/staticfiles.go
+++ b/modules/caddyhttp/fileserver/staticfiles.go
@@ -18,6 +18,7 @@ import (
"bytes"
"fmt"
"html/template"
+ "io"
weakrand "math/rand"
"mime"
"net/http"
@@ -191,6 +192,24 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, _ cadd
}
}
+ // if this handler exists in an error context (i.e. is
+ // part of a handler chain that is supposed to handle
+ // a previous error), we have to serve the content
+ // manually in order to write the correct status code
+ if reqErr, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); ok {
+ statusCode := http.StatusInternalServerError
+ if handlerErr, ok := reqErr.(caddyhttp.HandlerError); ok {
+ if handlerErr.StatusCode > 0 {
+ statusCode = handlerErr.StatusCode
+ }
+ }
+ w.WriteHeader(statusCode)
+ if r.Method != "HEAD" {
+ io.Copy(w, file)
+ }
+ return nil
+ }
+
// let the standard library do what it does best; note, however,
// that errors generated by ServeContent are written immediately
// to the response, so we cannot handle them (but errors there
diff --git a/modules/caddyhttp/marshalers.go b/modules/caddyhttp/marshalers.go
new file mode 100644
index 000000000..a6721327b
--- /dev/null
+++ b/modules/caddyhttp/marshalers.go
@@ -0,0 +1,89 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "crypto/tls"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LoggableHTTPRequest makes an HTTP request loggable with zap.Object().
+type LoggableHTTPRequest struct{ *http.Request }
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddString("method", r.Method)
+ enc.AddString("uri", r.RequestURI)
+ enc.AddString("proto", r.Proto)
+ enc.AddString("remote_addr", r.RemoteAddr)
+ enc.AddString("host", r.Host)
+ enc.AddObject("headers", LoggableHTTPHeader(r.Header))
+ if r.TLS != nil {
+ enc.AddObject("tls", LoggableTLSConnState(*r.TLS))
+ }
+ return nil
+}
+
+// LoggableHTTPHeader makes an HTTP header loggable with zap.Object().
+type LoggableHTTPHeader http.Header
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ if h == nil {
+ return nil
+ }
+ for key, val := range h {
+ enc.AddArray(key, LoggableStringArray(val))
+ }
+ return nil
+}
+
+// LoggableStringArray makes a slice of strings marshalable for logging.
+type LoggableStringArray []string
+
+// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
+func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ if sa == nil {
+ return nil
+ }
+ for _, s := range sa {
+ enc.AppendString(s)
+ }
+ return nil
+}
+
+// LoggableTLSConnState makes a TLS connection state loggable with zap.Object().
+type LoggableTLSConnState tls.ConnectionState
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddBool("resumed", t.DidResume)
+ enc.AddUint16("version", t.Version)
+ enc.AddUint16("resumed", t.CipherSuite)
+ enc.AddString("proto", t.NegotiatedProtocol)
+ enc.AddBool("proto_mutual", t.NegotiatedProtocolIsMutual)
+ enc.AddString("server_name", t.ServerName)
+ return nil
+}
+
+// Interface guards
+var (
+ _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil)
+ _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil)
+ _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil)
+ _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil)
+)
diff --git a/modules/caddyhttp/replacer.go b/modules/caddyhttp/replacer.go
index db329be52..ddbc25061 100644
--- a/modules/caddyhttp/replacer.go
+++ b/modules/caddyhttp/replacer.go
@@ -19,7 +19,6 @@ import (
"net"
"net/http"
"net/textproto"
- "net/url"
"path"
"strconv"
"strings"
@@ -112,27 +111,30 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
}
return qs, true
- // original URI, before any internal changes
+ // original request, before any internal changes
+ case "http.request.orig_method":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.Method, true
case "http.request.orig_uri":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.RequestURI(), true
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.RequestURI, true
case "http.request.orig_uri.path":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.Path, true
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.URL.Path, true
case "http.request.orig_uri.path.file":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- _, file := path.Split(u.Path)
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ _, file := path.Split(or.URL.Path)
return file, true
case "http.request.orig_uri.path.dir":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- dir, _ := path.Split(u.Path)
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ dir, _ := path.Split(or.URL.Path)
return dir, true
case "http.request.orig_uri.query":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.RawQuery, true
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.URL.RawQuery, true
case "http.request.orig_uri.query_string":
- u, _ := req.Context().Value(OriginalURLCtxKey).(url.URL)
- qs := u.Query().Encode()
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ qs := or.URL.Query().Encode()
if qs != "" {
qs = "?" + qs
}
@@ -183,7 +185,7 @@ func addHTTPVarsToReplacer(repl caddy.Replacer, req *http.Request, w http.Respon
// middleware variables
if strings.HasPrefix(key, varsReplPrefix) {
varName := key[len(varsReplPrefix):]
- tbl := req.Context().Value(VarCtxKey).(map[string]interface{})
+ tbl := req.Context().Value(VarsCtxKey).(map[string]interface{})
raw, ok := tbl[varName]
if !ok {
// variables can be dynamic, so always return true
diff --git a/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go b/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
index eaf1f8644..21aeb1759 100644
--- a/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
+++ b/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
@@ -19,7 +19,6 @@ import (
"crypto/tls"
"fmt"
"net/http"
- "net/url"
"path"
"path/filepath"
"strconv"
@@ -53,6 +52,9 @@ type Transport struct {
// with the value of SplitPath. The first piece will be assumed as the
// actual resource (CGI script) name, and the second piece will be set to
// PATH_INFO for the CGI script to use.
+ // Future enhancements should be careful to avoid CVE-2019-11043,
+ // which can be mitigated with use of a try_files-like behavior
+ // that 404's if the fastcgi path info is not found.
SplitPath string `json:"split_path,omitempty"`
// Extra environment variables
@@ -191,12 +193,13 @@ func (t Transport) buildEnv(r *http.Request) (map[string]string, error) {
// original URI in as the value of REQUEST_URI (the user can overwrite this
// if desired). Most PHP apps seem to want the original URI. Besides, this is
// how nginx defaults: http://stackoverflow.com/a/12485156/1048862
- reqURL, ok := r.Context().Value(caddyhttp.OriginalURLCtxKey).(url.URL)
+ origReq, ok := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
if !ok {
// some requests, like active health checks, don't add this to
// the request context, so we can just use the current URL
- reqURL = *r.URL
+ origReq = *r
}
+ reqURL := origReq.URL
requestScheme := "http"
if r.TLS != nil {
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
index 566c9c4bb..c2fb751ab 100644
--- a/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -441,6 +441,15 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, di Dia
}
}
+ // TODO: there should be an option to return an error if the response
+ // matches some criteria; would solve https://github.com/caddyserver/caddy/issues/1447
+ // by allowing the backend to determine whether this server should treat
+ // a 400+ status code as an error -- but we might need to be careful that
+ // we do not affect the health status of the backend... still looking into
+ // that; if we need to avoid that, we should return a particular error type
+ // that the caller of this function checks for and only applies health
+ // status changes if the error is not this special type
+
rw.WriteHeader(res.StatusCode)
err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
diff --git a/modules/caddyhttp/rewrite/rewrite.go b/modules/caddyhttp/rewrite/rewrite.go
index 7d4c6f70b..5a84a3317 100644
--- a/modules/caddyhttp/rewrite/rewrite.go
+++ b/modules/caddyhttp/rewrite/rewrite.go
@@ -23,6 +23,7 @@ import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "go.uber.org/zap"
)
func init() {
@@ -39,6 +40,8 @@ type Rewrite struct {
HTTPRedirect caddyhttp.WeakString `json:"http_redirect,omitempty"`
Rehandle bool `json:"rehandle,omitempty"`
+
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -49,6 +52,12 @@ func (Rewrite) CaddyModule() caddy.ModuleInfo {
}
}
+// Provision sets up rewr.
+func (rewr *Rewrite) Provision(ctx caddy.Context) error {
+ rewr.logger = ctx.Logger(rewr)
+ return nil
+}
+
// Validate ensures rewr's configuration is valid.
func (rewr Rewrite) Validate() error {
if rewr.HTTPRedirect != "" && rewr.Rehandle {
@@ -61,6 +70,10 @@ func (rewr Rewrite) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddy
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
var changed bool
+ logger := rewr.logger.With(
+ zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: r}),
+ )
+
// rewrite the method
if rewr.Method != "" {
method := r.Method
@@ -114,18 +127,23 @@ func (rewr Rewrite) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddy
r.RequestURI = newURI
}
- if changed && rewr.Rehandle {
- return caddyhttp.ErrRehandle
- }
-
- if changed && rewr.HTTPRedirect != "" {
- statusCode, err := strconv.Atoi(repl.ReplaceAll(rewr.HTTPRedirect.String(), ""))
- if err != nil {
- return caddyhttp.Error(http.StatusInternalServerError, err)
+ if changed {
+ logger.Debug("rewrote request",
+ zap.String("method", r.Method),
+ zap.String("uri", r.RequestURI),
+ )
+ if rewr.Rehandle {
+ return caddyhttp.ErrRehandle
+ }
+ if rewr.HTTPRedirect != "" {
+ statusCode, err := strconv.Atoi(repl.ReplaceAll(rewr.HTTPRedirect.String(), ""))
+ if err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+ w.Header().Set("Location", r.RequestURI)
+ w.WriteHeader(statusCode)
+ return nil
}
- w.Header().Set("Location", r.RequestURI)
- w.WriteHeader(statusCode)
- return nil
}
return next.ServeHTTP(w, r)
diff --git a/modules/caddyhttp/server.go b/modules/caddyhttp/server.go
index 366a30788..2c0c44e68 100644
--- a/modules/caddyhttp/server.go
+++ b/modules/caddyhttp/server.go
@@ -17,16 +17,18 @@ package caddyhttp
import (
"context"
"fmt"
- "log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
+ "time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/lucas-clemente/quic-go/http3"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
)
// Server is an HTTP server.
@@ -43,11 +45,15 @@ type Server struct {
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
MaxRehandles *int `json:"max_rehandles,omitempty"`
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
+ Logs *ServerLogConfig `json:"logs,omitempty"`
// This field is not subject to compatibility promises
ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
- tlsApp *caddytls.TLS
+ tlsApp *caddytls.TLS
+ logger *zap.Logger
+ accessLogger *zap.Logger
+ errorLogger *zap.Logger
h3server *http3.Server
}
@@ -59,58 +65,106 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if s.h3server != nil {
err := s.h3server.SetQuicHeaders(w.Header())
if err != nil {
- log.Printf("[ERROR] Setting HTTP/3 Alt-Svc header: %v", err)
+ s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
}
}
- if s.tlsApp.HandleHTTPChallenge(w, r) {
- return
- }
-
// set up the context for the request
repl := caddy.NewReplacer()
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
ctx = context.WithValue(ctx, ServerCtxKey, s)
- ctx = context.WithValue(ctx, VarCtxKey, make(map[string]interface{}))
- ctx = context.WithValue(ctx, OriginalURLCtxKey, cloneURL(r.URL))
+ ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
+ var url2 url.URL // avoid letting this escape to the heap
+ ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
r = r.WithContext(ctx)
// once the pointer to the request won't change
// anymore, finish setting up the replacer
addHTTPVarsToReplacer(repl, r, w)
- // build and execute the main handler chain
+ loggableReq := LoggableHTTPRequest{r}
+ errLog := s.errorLogger.With(
+ // encode the request for logging purposes before
+ // it enters any handler chain; this is necessary
+ // to capture the original request in case it gets
+ // modified during handling
+ zap.Object("request", loggableReq),
+ )
+
+ if s.Logs != nil {
+ wrec := NewResponseRecorder(w, nil, nil)
+ w = wrec
+ accLog := s.accessLogger.With(
+ // capture the original version of the request
+ zap.Object("request", loggableReq),
+ )
+ start := time.Now()
+ defer func() {
+ latency := time.Since(start)
+
+ repl.Set("http.response.status", strconv.Itoa(wrec.Status()))
+ repl.Set("http.response.size", strconv.Itoa(wrec.Size()))
+ repl.Set("http.response.latency", latency.String())
+
+ logger := accLog
+ if s.Logs.LoggerNames != nil {
+ logger = logger.Named(s.Logs.LoggerNames[r.Host])
+ }
+
+ log := logger.Info
+ if wrec.Status() >= 400 {
+ log = logger.Error
+ }
+
+ log("request",
+ zap.String("common_log", repl.ReplaceAll(CommonLogFormat, "-")),
+ zap.Duration("latency", latency),
+ zap.Int("size", wrec.Size()),
+ zap.Int("status", wrec.Status()),
+ )
+ }()
+ }
+
+ // guarantee ACME HTTP challenges; handle them
+ // separately from any user-defined handlers
+ if s.tlsApp.HandleHTTPChallenge(w, r) {
+ return
+ }
+
+ // build and execute the primary handler chain
err := s.executeCompositeRoute(w, r, s.Routes)
if err != nil {
- // add the raw error value to the request context
- // so it can be accessed by error handlers
- c := context.WithValue(r.Context(), ErrorCtxKey, err)
- r = r.WithContext(c)
-
- // add error values to the replacer
- repl.Set("http.error", err.Error())
- if handlerErr, ok := err.(HandlerError); ok {
- repl.Set("http.error.status_code", strconv.Itoa(handlerErr.StatusCode))
- repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
- repl.Set("http.error.message", handlerErr.Message)
- repl.Set("http.error.trace", handlerErr.Trace)
- repl.Set("http.error.id", handlerErr.ID)
+ // prepare the error log
+ logger := errLog
+ if s.Logs.LoggerNames != nil {
+ logger = logger.Named(s.Logs.LoggerNames[r.Host])
}
+ // get the values that will be used to log the error
+ errStatus, errMsg, errFields := errLogValues(err)
+
+ // add HTTP error information to request context
+ r = s.Errors.WithError(r, err)
+
if s.Errors != nil && len(s.Errors.Routes) > 0 {
- err := s.executeCompositeRoute(w, r, s.Errors.Routes)
- if err != nil {
- // TODO: what should we do if the error handler has an error?
- log.Printf("[ERROR] [%s %s] handling error: %v", r.Method, r.RequestURI, err)
- }
- } else {
- // TODO: polish the default error handling
- log.Printf("[ERROR] [%s %s] %v", r.Method, r.RequestURI, err)
- if handlerErr, ok := err.(HandlerError); ok {
- w.WriteHeader(handlerErr.StatusCode)
+ // execute user-defined error handling route
+ err2 := s.executeCompositeRoute(w, r, s.Errors.Routes)
+ if err2 == nil {
+ // user's error route handled the error response
+ // successfully, so now just log the error
+ logger.Error(errMsg, errFields...)
} else {
- w.WriteHeader(http.StatusInternalServerError)
+ // well... this is awkward
+ errFields = append([]zapcore.Field{
+ zap.String("error", err2.Error()),
+ zap.Namespace("first_error"),
+ zap.String("msg", errMsg),
+ }, errFields...)
+ logger.Error("error handling handler error", errFields...)
}
+ } else {
+ logger.Error(errMsg, errFields...)
+ w.WriteHeader(errStatus)
}
}
}
@@ -278,26 +332,101 @@ type HTTPErrorConfig struct {
Routes RouteList `json:"routes,omitempty"`
}
+// WithError makes a shallow copy of r to add the error to its
+// context, and sets placeholders on the request's replacer
+// related to err. It returns the modified request which has
+// the error information in its context and replacer. It
+// overwrites any existing error values that are stored.
+func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
+ // add the raw error value to the request context
+ // so it can be accessed by error handlers
+ c := context.WithValue(r.Context(), ErrorCtxKey, err)
+ r = r.WithContext(c)
+
+ // add error values to the replacer
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
+ repl.Set("http.error", err.Error())
+ if handlerErr, ok := err.(HandlerError); ok {
+ repl.Set("http.error.status_code", strconv.Itoa(handlerErr.StatusCode))
+ repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
+ repl.Set("http.error.message", handlerErr.Message)
+ repl.Set("http.error.trace", handlerErr.Trace)
+ repl.Set("http.error.id", handlerErr.ID)
+ }
+
+ return r
+}
+
+// ServerLogConfig describes a server's logging configuration.
+type ServerLogConfig struct {
+ LoggerNames map[string]string `json:"logger_names,omitempty"`
+}
+
+// errLogValues inspects err and returns the status code
+// to use, the error log message, and any extra fields.
+// If err is a HandlerError, the returned values will
+// have richer information.
+func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
+ if handlerErr, ok := err.(HandlerError); ok {
+ status = handlerErr.StatusCode
+ msg = handlerErr.Err.Error()
+ fields = []zapcore.Field{
+ zap.Int("status", handlerErr.StatusCode),
+ zap.String("err_id", handlerErr.ID),
+ zap.String("err_trace", handlerErr.Trace),
+ }
+ return
+ }
+ status = http.StatusInternalServerError
+ msg = err.Error()
+ return
+}
+
+// originalRequest returns a partial, shallow copy of
+// req, including: req.Method, deep copy of req.URL
+// (into the urlCopy parameter, which should be on the
+// stack), and req.RequestURI. Notably, headers are not
+// copied. This function is designed to be very fast
+// and efficient, and useful primarly for read-only
+// logging purposes.
+func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
+ urlCopy = cloneURL(req.URL)
+ return http.Request{
+ Method: req.Method,
+ RequestURI: req.RequestURI,
+ URL: urlCopy,
+ }
+}
+
// cloneURL makes a copy of r.URL and returns a
// new value that doesn't reference the original.
-func cloneURL(u *url.URL) url.URL {
+func cloneURL(u *url.URL) *url.URL {
urlCopy := *u
if u.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *u.User
urlCopy.User = userInfo
}
- return urlCopy
+ return &urlCopy
}
+const (
+ // CommonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format
+ CommonLogFormat = `{http.request.remote.host} ` + CommonLogEmptyValue + ` {http.handlers.authentication.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}`
+
+ // CommonLogEmptyValue is the common empty log value.
+ CommonLogEmptyValue = "-"
+)
+
// Context keys for HTTP request context values.
const (
// For referencing the server instance
ServerCtxKey caddy.CtxKey = "server"
// For the request's variable table
- VarCtxKey caddy.CtxKey = "vars"
+ VarsCtxKey caddy.CtxKey = "vars"
- // For the unmodified URL that originally came in with a request
- OriginalURLCtxKey caddy.CtxKey = "original_url"
+ // For a partial copy of the unmodified request that
+ // originally came into the server's entry handler
+ OriginalRequestCtxKey caddy.CtxKey = "original_request"
)
diff --git a/modules/caddyhttp/staticresp.go b/modules/caddyhttp/staticresp.go
index a435f8725..732a3fb47 100644
--- a/modules/caddyhttp/staticresp.go
+++ b/modules/caddyhttp/staticresp.go
@@ -112,7 +112,7 @@ func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Hand
// write response body
if s.Body != "" {
- fmt.Fprint(w, repl.ReplaceAll(s.Body, ""))
+ fmt.Fprint(w, repl.ReplaceKnown(s.Body, ""))
}
return nil
diff --git a/modules/caddyhttp/subroute.go b/modules/caddyhttp/subroute.go
index 3b0d718b0..57fb80aff 100644
--- a/modules/caddyhttp/subroute.go
+++ b/modules/caddyhttp/subroute.go
@@ -30,8 +30,15 @@ func init() {
// matchers, or for routes with matchers that must be have deferred
// evaluation (e.g. if they depend on placeholders created by other
// matchers that need to be evaluated first).
+//
+// You can also use subroutes to handle errors from specific handlers.
+// First the primary Routes will be executed, and if they return an
+// error, the Errors routes will be executed; in that case, an error
+// is only returned to the entry point at the server if there is an
+// additional error returned from the errors routes.
type Subroute struct {
- Routes RouteList `json:"routes,omitempty"`
+ Routes RouteList `json:"routes,omitempty"`
+ Errors *HTTPErrorConfig `json:"errors,omitempty"`
}
// CaddyModule returns the Caddy module information.
@@ -47,7 +54,13 @@ func (sr *Subroute) Provision(ctx caddy.Context) error {
if sr.Routes != nil {
err := sr.Routes.Provision(ctx)
if err != nil {
- return fmt.Errorf("setting up routes: %v", err)
+ return fmt.Errorf("setting up subroutes: %v", err)
+ }
+ if sr.Errors != nil {
+ err := sr.Errors.Routes.Provision(ctx)
+ if err != nil {
+ return fmt.Errorf("setting up error subroutes: %v", err)
+ }
}
}
return nil
@@ -55,7 +68,13 @@ func (sr *Subroute) Provision(ctx caddy.Context) error {
func (sr *Subroute) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
subroute := sr.Routes.BuildCompositeRoute(r)
- return subroute.ServeHTTP(w, r)
+ err := subroute.ServeHTTP(w, r)
+ if err != nil && sr.Errors != nil {
+ r = sr.Errors.WithError(r, err)
+ errRoute := sr.Errors.Routes.BuildCompositeRoute(r)
+ return errRoute.ServeHTTP(w, r)
+ }
+ return err
}
// Interface guards
diff --git a/modules/caddyhttp/vars.go b/modules/caddyhttp/vars.go
index bbd45680f..3fb8fa365 100644
--- a/modules/caddyhttp/vars.go
+++ b/modules/caddyhttp/vars.go
@@ -38,7 +38,7 @@ func (VarsMiddleware) CaddyModule() caddy.ModuleInfo {
}
func (t VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
- vars := r.Context().Value(VarCtxKey).(map[string]interface{})
+ vars := r.Context().Value(VarsCtxKey).(map[string]interface{})
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
for k, v := range t {
keyExpanded := repl.ReplaceAll(k, "")
@@ -62,7 +62,7 @@ func (VarsMatcher) CaddyModule() caddy.ModuleInfo {
// Match matches a request based on variables in the context.
func (m VarsMatcher) Match(r *http.Request) bool {
- vars := r.Context().Value(VarCtxKey).(map[string]string)
+ vars := r.Context().Value(VarsCtxKey).(map[string]string)
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
for k, v := range m {
keyExpanded := repl.ReplaceAll(k, "")
diff --git a/modules/caddytls/tls.go b/modules/caddytls/tls.go
index 5054081ad..5dfe06391 100644
--- a/modules/caddytls/tls.go
+++ b/modules/caddytls/tls.go
@@ -18,7 +18,6 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
- "log"
"net/http"
"sync"
"time"
@@ -26,6 +25,7 @@ import (
"github.com/caddyserver/caddy/v2"
"github.com/go-acme/lego/v3/challenge"
"github.com/mholt/certmagic"
+ "go.uber.org/zap"
)
func init() {
@@ -43,6 +43,7 @@ type TLS struct {
ctx caddy.Context
storageCleanTicker *time.Ticker
storageCleanStop chan struct{}
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -56,6 +57,7 @@ func (TLS) CaddyModule() caddy.ModuleInfo {
// Provision sets up the configuration for the TLS app.
func (t *TLS) Provision(ctx caddy.Context) error {
t.ctx = ctx
+ t.logger = ctx.Logger(t)
// set up a new certificate cache; this (re)loads all certificates
cacheOpts := certmagic.CacheOptions{
@@ -290,7 +292,7 @@ func (t *TLS) cleanStorageUnits() {
storageClean = time.Now()
- log.Println("[INFO] tls: Cleaned up storage unit(s)")
+ t.logger.Info("cleaned up storage units")
}
// CertificateLoader is a type that can load certificates.
diff --git a/modules/logging/encoders.go b/modules/logging/encoders.go
new file mode 100644
index 000000000..c3c8aba9d
--- /dev/null
+++ b/modules/logging/encoders.go
@@ -0,0 +1,268 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/caddyserver/caddy/v2"
+ zaplogfmt "github.com/jsternberg/zap-logfmt"
+ "go.uber.org/zap"
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/zapcore"
+)
+
+func init() {
+ caddy.RegisterModule(ConsoleEncoder{})
+ caddy.RegisterModule(JSONEncoder{})
+ caddy.RegisterModule(LogfmtEncoder{})
+ caddy.RegisterModule(StringEncoder{})
+}
+
+// ConsoleEncoder encodes log entries that are mostly human-readable.
+type ConsoleEncoder struct {
+ zapcore.Encoder
+ LogEncoderConfig
+}
+
+// CaddyModule returns the Caddy module information.
+func (ConsoleEncoder) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.console",
+ New: func() caddy.Module { return new(ConsoleEncoder) },
+ }
+}
+
+// Provision sets up the encoder.
+func (ce *ConsoleEncoder) Provision(_ caddy.Context) error {
+ ce.Encoder = zapcore.NewConsoleEncoder(ce.ZapcoreEncoderConfig())
+ return nil
+}
+
+// JSONEncoder encodes entries as JSON.
+type JSONEncoder struct {
+ zapcore.Encoder
+ *LogEncoderConfig
+}
+
+// CaddyModule returns the Caddy module information.
+func (JSONEncoder) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.json",
+ New: func() caddy.Module { return new(JSONEncoder) },
+ }
+}
+
+// Provision sets up the encoder.
+func (je *JSONEncoder) Provision(_ caddy.Context) error {
+ je.Encoder = zapcore.NewJSONEncoder(je.ZapcoreEncoderConfig())
+ return nil
+}
+
+// LogfmtEncoder encodes log entries as logfmt:
+// https://www.brandur.org/logfmt
+type LogfmtEncoder struct {
+ zapcore.Encoder
+ LogEncoderConfig
+}
+
+// CaddyModule returns the Caddy module information.
+func (LogfmtEncoder) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.logfmt",
+ New: func() caddy.Module { return new(LogfmtEncoder) },
+ }
+}
+
+// Provision sets up the encoder.
+func (lfe *LogfmtEncoder) Provision(_ caddy.Context) error {
+ lfe.Encoder = zaplogfmt.NewEncoder(lfe.ZapcoreEncoderConfig())
+ return nil
+}
+
+// StringEncoder writes a log entry that consists entirely
+// of a single string field in the log entry. This is useful
+// for custom, self-encoded log entries that consist of a
+// single field in the structured log entry.
+type StringEncoder struct {
+ zapcore.Encoder
+ FieldName string `json:"field,omitempty"`
+ FallbackRaw json.RawMessage `json:"fallback,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (StringEncoder) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.string",
+ New: func() caddy.Module { return new(StringEncoder) },
+ }
+}
+
+// Provision sets up the encoder.
+func (se *StringEncoder) Provision(ctx caddy.Context) error {
+ if se.FallbackRaw != nil {
+ val, err := ctx.LoadModuleInline("format", "caddy.logging.encoders", se.FallbackRaw)
+ if err != nil {
+ return fmt.Errorf("loading fallback encoder module: %v", err)
+ }
+ se.FallbackRaw = nil // allow GC to deallocate
+ se.Encoder = val.(zapcore.Encoder)
+ }
+ if se.Encoder == nil {
+ se.Encoder = nopEncoder{}
+ }
+ return nil
+}
+
+// Clone wraps the underlying encoder's Clone. This is
+// necessary because we implement our own EncodeEntry,
+// and if we simply let the embedded encoder's Clone
+// be promoted, it would return a clone of that, and
+// we'd lose our StringEncoder's EncodeEntry.
+func (se StringEncoder) Clone() zapcore.Encoder {
+ return StringEncoder{
+ Encoder: se.Encoder.Clone(),
+ FieldName: se.FieldName,
+ }
+}
+
+// EncodeEntry partially implements the zapcore.Encoder interface.
+func (se StringEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+ for _, f := range fields {
+ if f.Key == se.FieldName {
+ buf := bufferpool.Get()
+ buf.AppendString(f.String)
+ if !strings.HasSuffix(f.String, "\n") {
+ buf.AppendByte('\n')
+ }
+ return buf, nil
+ }
+ }
+ if se.Encoder == nil {
+ return nil, fmt.Errorf("no fallback encoder defined")
+ }
+ return se.Encoder.EncodeEntry(ent, fields)
+}
+
+// LogEncoderConfig holds configuration common to most encoders.
+type LogEncoderConfig struct {
+ MessageKey *string `json:"message_key,omitempty"`
+ LevelKey *string `json:"level_key,omitempty"`
+ TimeKey *string `json:"time_key,omitempty"`
+ NameKey *string `json:"name_key,omitempty"`
+ CallerKey *string `json:"caller_key,omitempty"`
+ StacktraceKey *string `json:"stacktrace_key,omitempty"`
+ LineEnding *string `json:"line_ending,omitempty"`
+ TimeFormat string `json:"time_format,omitempty"`
+ DurationFormat string `json:"duration_format,omitempty"`
+ LevelFormat string `json:"level_format,omitempty"`
+}
+
+// ZapcoreEncoderConfig returns the equivalent zapcore.EncoderConfig.
+// If lec is nil, zap.NewProductionEncoderConfig() is returned.
+func (lec *LogEncoderConfig) ZapcoreEncoderConfig() zapcore.EncoderConfig {
+ cfg := zap.NewProductionEncoderConfig()
+ if lec == nil {
+ lec = new(LogEncoderConfig)
+ }
+ if lec.MessageKey != nil {
+ cfg.MessageKey = *lec.MessageKey
+ }
+ if lec.TimeKey != nil {
+ cfg.TimeKey = *lec.TimeKey
+ }
+ if lec.NameKey != nil {
+ cfg.NameKey = *lec.NameKey
+ }
+ if lec.CallerKey != nil {
+ cfg.CallerKey = *lec.CallerKey
+ }
+ if lec.StacktraceKey != nil {
+ cfg.StacktraceKey = *lec.StacktraceKey
+ }
+ if lec.LineEnding != nil {
+ cfg.LineEnding = *lec.LineEnding
+ }
+
+ // time format
+ var timeFormatter zapcore.TimeEncoder
+ switch lec.TimeFormat {
+ case "", "unix_seconds_float":
+ timeFormatter = zapcore.EpochTimeEncoder
+ case "unix_milli_float":
+ timeFormatter = zapcore.EpochMillisTimeEncoder
+ case "unix_nano":
+ timeFormatter = zapcore.EpochNanosTimeEncoder
+ case "iso8601":
+ timeFormatter = zapcore.ISO8601TimeEncoder
+ default:
+ timeFormat := lec.TimeFormat
+ switch lec.TimeFormat {
+ case "rfc3339":
+ timeFormat = time.RFC3339
+ case "rfc3339_nano":
+ timeFormat = time.RFC3339Nano
+ case "wall":
+ timeFormat = "2006/01/02 15:04:05"
+ case "wall_milli":
+ timeFormat = "2006/01/02 15:04:05.000"
+ case "wall_nano":
+ timeFormat = "2006/01/02 15:04:05.000000000"
+ }
+ timeFormatter = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
+ encoder.AppendString(ts.UTC().Format(timeFormat))
+ }
+ }
+ cfg.EncodeTime = timeFormatter
+
+ // duration format
+ var durFormatter zapcore.DurationEncoder
+ switch lec.DurationFormat {
+ case "", "seconds":
+ durFormatter = zapcore.SecondsDurationEncoder
+ case "nano":
+ durFormatter = zapcore.NanosDurationEncoder
+ case "string":
+ durFormatter = zapcore.StringDurationEncoder
+ }
+ cfg.EncodeDuration = durFormatter
+
+ // level format
+ var levelFormatter zapcore.LevelEncoder
+ switch lec.LevelFormat {
+ case "", "lower":
+ levelFormatter = zapcore.LowercaseLevelEncoder
+ case "upper":
+ levelFormatter = zapcore.CapitalLevelEncoder
+ case "color":
+ levelFormatter = zapcore.CapitalColorLevelEncoder
+ }
+ cfg.EncodeLevel = levelFormatter
+
+ return cfg
+}
+
+var bufferpool = buffer.NewPool()
+
+// Interface guards
+var (
+ _ zapcore.Encoder = (*ConsoleEncoder)(nil)
+ _ zapcore.Encoder = (*JSONEncoder)(nil)
+ _ zapcore.Encoder = (*LogfmtEncoder)(nil)
+ _ zapcore.Encoder = (*StringEncoder)(nil)
+)
diff --git a/modules/logging/filewriter.go b/modules/logging/filewriter.go
new file mode 100644
index 000000000..29e805ee7
--- /dev/null
+++ b/modules/logging/filewriter.go
@@ -0,0 +1,91 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/caddyserver/caddy/v2"
+ "gopkg.in/natefinch/lumberjack.v2"
+)
+
+func init() {
+ caddy.RegisterModule(FileWriter{})
+}
+
+// FileWriter can write logs to files.
+type FileWriter struct {
+ Filename string `json:"filename,omitempty"`
+ Roll *bool `json:"roll,omitempty"`
+ RollSizeMB int `json:"roll_size_mb,omitempty"`
+ RollCompress *bool `json:"roll_gzip,omitempty"`
+ RollLocalTime bool `json:"roll_local_time,omitempty"`
+ RollKeep int `json:"roll_keep,omitempty"`
+ RollKeepDays int `json:"roll_keep_days,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (FileWriter) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.writers.file",
+ New: func() caddy.Module { return new(FileWriter) },
+ }
+}
+
+func (fw FileWriter) String() string {
+ fpath, err := filepath.Abs(fw.Filename)
+ if err == nil {
+ return fpath
+ }
+ return fw.Filename
+}
+
+// WriterKey returns a unique key representing this fw.
+func (fw FileWriter) WriterKey() string {
+ return "file:" + fw.Filename
+}
+
+// OpenWriter opens a new file writer.
+func (fw FileWriter) OpenWriter() (io.WriteCloser, error) {
+ // roll log files by default
+ if fw.Roll == nil || *fw.Roll == true {
+ if fw.RollSizeMB == 0 {
+ fw.RollSizeMB = 100
+ }
+ if fw.RollCompress == nil {
+ compress := true
+ fw.RollCompress = &compress
+ }
+ if fw.RollKeep == 0 {
+ fw.RollKeep = 10
+ }
+ if fw.RollKeepDays == 0 {
+ fw.RollKeepDays = 90
+ }
+ return &lumberjack.Logger{
+ Filename: fw.Filename,
+ MaxSize: fw.RollSizeMB,
+ MaxAge: fw.RollKeepDays,
+ MaxBackups: fw.RollKeep,
+ LocalTime: fw.RollLocalTime,
+ Compress: *fw.RollCompress,
+ }, nil
+ }
+
+ // otherwise just open a regular file
+ return os.OpenFile(fw.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+}
diff --git a/modules/logging/filterencoder.go b/modules/logging/filterencoder.go
new file mode 100644
index 000000000..eff0279d6
--- /dev/null
+++ b/modules/logging/filterencoder.go
@@ -0,0 +1,321 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/caddyserver/caddy/v2"
+ "go.uber.org/zap"
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/zapcore"
+)
+
+func init() {
+ caddy.RegisterModule(FilterEncoder{})
+}
+
+// FilterEncoder wraps an underlying encoder. It does
+// not do any encoding itself, but it can manipulate
+// (filter) fields before they are actually encoded.
+// A wrapped encoder is required.
+type FilterEncoder struct {
+ WrappedRaw json.RawMessage `json:"wrap,omitempty"`
+ FieldsRaw map[string]json.RawMessage `json:"fields,omitempty"`
+
+ wrapped zapcore.Encoder
+ Fields map[string]LogFieldFilter `json:"-"`
+
+ // used to keep keys unique across nested objects
+ keyPrefix string
+}
+
+// CaddyModule returns the Caddy module information.
+func (FilterEncoder) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.filter",
+ New: func() caddy.Module { return new(FilterEncoder) },
+ }
+}
+
+// Provision sets up the encoder.
+func (fe *FilterEncoder) Provision(ctx caddy.Context) error {
+ if fe.WrappedRaw == nil {
+ return fmt.Errorf("missing \"wrap\" (must specify an underlying encoder)")
+ }
+
+ // set up wrapped encoder (required)
+ val, err := ctx.LoadModuleInline("format", "caddy.logging.encoders", fe.WrappedRaw)
+ if err != nil {
+ return fmt.Errorf("loading fallback encoder module: %v", err)
+ }
+ fe.WrappedRaw = nil // allow GC to deallocate
+ fe.wrapped = val.(zapcore.Encoder)
+
+ // set up each field filter
+ if fe.Fields == nil {
+ fe.Fields = make(map[string]LogFieldFilter)
+ }
+ for field, filterRaw := range fe.FieldsRaw {
+ if filterRaw == nil {
+ continue
+ }
+ val, err := ctx.LoadModuleInline("filter", "caddy.logging.encoders.filter", filterRaw)
+ if err != nil {
+ return fmt.Errorf("loading log filter module: %v", err)
+ }
+ fe.Fields[field] = val.(LogFieldFilter)
+ }
+ fe.FieldsRaw = nil // allow GC to deallocate
+
+ return nil
+}
+
+// AddArray is part of the zapcore.ObjectEncoder interface.
+// Array elements do not get filtered.
+func (fe FilterEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error {
+ if filter, ok := fe.Fields[fe.keyPrefix+key]; ok {
+ filter.Filter(zap.Array(key, marshaler)).AddTo(fe.wrapped)
+ return nil
+ }
+ return fe.wrapped.AddArray(key, marshaler)
+}
+
+// AddObject is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error {
+ fe.keyPrefix += key + ">"
+ return fe.wrapped.AddObject(key, logObjectMarshalerWrapper{
+ enc: fe,
+ marsh: marshaler,
+ })
+}
+
+// AddBinary is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddBinary(key string, value []byte) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddBinary(key, value)
+ }
+}
+
+// AddByteString is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddByteString(key string, value []byte) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddByteString(key, value)
+ }
+}
+
+// AddBool is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddBool(key string, value bool) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddBool(key, value)
+ }
+}
+
+// AddComplex128 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddComplex128(key string, value complex128) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddComplex128(key, value)
+ }
+}
+
+// AddComplex64 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddComplex64(key string, value complex64) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddComplex64(key, value)
+ }
+}
+
+// AddDuration is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddDuration(key string, value time.Duration) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddDuration(key, value)
+ }
+}
+
+// AddFloat64 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddFloat64(key string, value float64) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddFloat64(key, value)
+ }
+}
+
+// AddFloat32 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddFloat32(key string, value float32) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddFloat32(key, value)
+ }
+}
+
+// AddInt is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddInt(key string, value int) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddInt(key, value)
+ }
+}
+
+// AddInt64 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddInt64(key string, value int64) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddInt64(key, value)
+ }
+}
+
+// AddInt32 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddInt32(key string, value int32) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddInt32(key, value)
+ }
+}
+
+// AddInt16 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddInt16(key string, value int16) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddInt16(key, value)
+ }
+}
+
+// AddInt8 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddInt8(key string, value int8) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddInt8(key, value)
+ }
+}
+
+// AddString is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddString(key, value string) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddString(key, value)
+ }
+}
+
+// AddTime is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddTime(key string, value time.Time) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddTime(key, value)
+ }
+}
+
+// AddUint is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUint(key string, value uint) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUint(key, value)
+ }
+}
+
+// AddUint64 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUint64(key string, value uint64) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUint64(key, value)
+ }
+}
+
+// AddUint32 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUint32(key string, value uint32) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUint32(key, value)
+ }
+}
+
+// AddUint16 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUint16(key string, value uint16) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUint16(key, value)
+ }
+}
+
+// AddUint8 is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUint8(key string, value uint8) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUint8(key, value)
+ }
+}
+
+// AddUintptr is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddUintptr(key string, value uintptr) {
+ if !fe.filtered(key, value) {
+ fe.wrapped.AddUintptr(key, value)
+ }
+}
+
+// AddReflected is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) AddReflected(key string, value interface{}) error {
+ if !fe.filtered(key, value) {
+ return fe.wrapped.AddReflected(key, value)
+ }
+ return nil
+}
+
+// OpenNamespace is part of the zapcore.ObjectEncoder interface.
+func (fe FilterEncoder) OpenNamespace(key string) {
+ fe.wrapped.OpenNamespace(key)
+}
+
+// Clone is part of the zapcore.ObjectEncoder interface.
+// We don't use it as of Oct 2019 (v2 beta 7), I'm not
+// really sure what it'd be useful for in our case.
+func (fe FilterEncoder) Clone() zapcore.Encoder {
+ return FilterEncoder{
+ Fields: fe.Fields,
+ wrapped: fe.wrapped.Clone(),
+ keyPrefix: fe.keyPrefix,
+ }
+}
+
+// EncodeEntry partially implements the zapcore.Encoder interface.
+func (fe FilterEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+ // without this clone and storing it to fe.wrapped, fields
+ // from subsequent log entries get appended to previous
+ // ones, and I'm not 100% sure why; see end of
+ // https://github.com/uber-go/zap/issues/750
+ fe.wrapped = fe.wrapped.Clone()
+ for _, field := range fields {
+ field.AddTo(fe)
+ }
+ return fe.wrapped.EncodeEntry(ent, nil)
+}
+
+// filtered returns true if the field was filtered.
+// If true is returned, the field was filtered and
+// added to the underlying encoder (so do not do
+// that again). If false was returned, the field has
+// not yet been added to the underlying encoder.
+func (fe FilterEncoder) filtered(key string, value interface{}) bool {
+ filter, ok := fe.Fields[fe.keyPrefix+key]
+ if !ok {
+ return false
+ }
+ filter.Filter(zap.Any(key, value)).AddTo(fe.wrapped)
+ return true
+}
+
+// logObjectMarshalerWrapper allows us to recursively
+// filter fields of objects as they get encoded.
+type logObjectMarshalerWrapper struct {
+ enc FilterEncoder
+ marsh zapcore.ObjectMarshaler
+}
+
+// MarshalLogObject implements the zapcore.ObjectMarshaler interface.
+func (mom logObjectMarshalerWrapper) MarshalLogObject(_ zapcore.ObjectEncoder) error {
+ return mom.marsh.MarshalLogObject(mom.enc)
+}
+
+// Interface guards
+var (
+ _ zapcore.Encoder = (*FilterEncoder)(nil)
+ _ zapcore.ObjectMarshaler = (*logObjectMarshalerWrapper)(nil)
+)
diff --git a/modules/logging/filters.go b/modules/logging/filters.go
new file mode 100644
index 000000000..b44e08424
--- /dev/null
+++ b/modules/logging/filters.go
@@ -0,0 +1,94 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "net"
+
+ "github.com/caddyserver/caddy/v2"
+ "go.uber.org/zap/zapcore"
+)
+
+func init() {
+ caddy.RegisterModule(DeleteFilter{})
+ caddy.RegisterModule(IPMaskFilter{})
+}
+
+// LogFieldFilter can filter (or manipulate)
+// a field in a log entry. If delete is true,
+// out will be ignored and the field will be
+// removed from the output.
+type LogFieldFilter interface {
+ Filter(zapcore.Field) zapcore.Field
+}
+
+// DeleteFilter is a Caddy log field filter that
+// deletes the field.
+type DeleteFilter struct{}
+
+// CaddyModule returns the Caddy module information.
+func (DeleteFilter) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.filter.delete",
+ New: func() caddy.Module { return new(DeleteFilter) },
+ }
+}
+
+// Filter filters the input field.
+func (DeleteFilter) Filter(in zapcore.Field) zapcore.Field {
+ in.Type = zapcore.SkipType
+ return in
+}
+
+// IPMaskFilter is a Caddy log field filter that
+// masks IP addresses.
+type IPMaskFilter struct {
+ IPv4CIDR int `json:"ipv4_cidr,omitempty"`
+ IPv6CIDR int `json:"ipv6_cidr,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (IPMaskFilter) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ Name: "caddy.logging.encoders.filter.ip_mask",
+ New: func() caddy.Module { return new(IPMaskFilter) },
+ }
+}
+
+// Filter filters the input field.
+func (m IPMaskFilter) Filter(in zapcore.Field) zapcore.Field {
+ host, port, err := net.SplitHostPort(in.String)
+ if err != nil {
+ host = in.String // assume whole thing was IP address
+ }
+ ipAddr := net.ParseIP(host)
+ if ipAddr == nil {
+ return in
+ }
+ bitLen := 32
+ cidrPrefix := m.IPv4CIDR
+ if ipAddr.To16() != nil {
+ bitLen = 128
+ cidrPrefix = m.IPv6CIDR
+ }
+ mask := net.CIDRMask(cidrPrefix, bitLen)
+ masked := ipAddr.Mask(mask)
+ if port == "" {
+ in.String = masked.String()
+ } else {
+ in.String = net.JoinHostPort(masked.String(), port)
+ }
+ return in
+}
diff --git a/modules/logging/nopencoder.go b/modules/logging/nopencoder.go
new file mode 100644
index 000000000..fc3d70d98
--- /dev/null
+++ b/modules/logging/nopencoder.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "time"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/zapcore"
+)
+
+// nopEncoder is a zapcore.Encoder that does nothing.
+type nopEncoder struct{}
+
+// AddArray is part of the zapcore.ObjectEncoder interface.
+// Array elements do not get filtered.
+func (nopEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { return nil }
+
+// AddObject is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { return nil }
+
+// AddBinary is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddBinary(key string, value []byte) {}
+
+// AddByteString is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddByteString(key string, value []byte) {}
+
+// AddBool is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddBool(key string, value bool) {}
+
+// AddComplex128 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddComplex128(key string, value complex128) {}
+
+// AddComplex64 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddComplex64(key string, value complex64) {}
+
+// AddDuration is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddDuration(key string, value time.Duration) {}
+
+// AddFloat64 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddFloat64(key string, value float64) {}
+
+// AddFloat32 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddFloat32(key string, value float32) {}
+
+// AddInt is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddInt(key string, value int) {}
+
+// AddInt64 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddInt64(key string, value int64) {}
+
+// AddInt32 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddInt32(key string, value int32) {}
+
+// AddInt16 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddInt16(key string, value int16) {}
+
+// AddInt8 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddInt8(key string, value int8) {}
+
+// AddString is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddString(key, value string) {}
+
+// AddTime is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddTime(key string, value time.Time) {}
+
+// AddUint is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUint(key string, value uint) {}
+
+// AddUint64 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUint64(key string, value uint64) {}
+
+// AddUint32 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUint32(key string, value uint32) {}
+
+// AddUint16 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUint16(key string, value uint16) {}
+
+// AddUint8 is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUint8(key string, value uint8) {}
+
+// AddUintptr is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddUintptr(key string, value uintptr) {}
+
+// AddReflected is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) AddReflected(key string, value interface{}) error { return nil }
+
+// OpenNamespace is part of the zapcore.ObjectEncoder interface.
+func (nopEncoder) OpenNamespace(key string) {}
+
+// Clone is part of the zapcore.ObjectEncoder interface.
+// We don't use it as of Oct 2019 (v2 beta 7), I'm not
+// really sure what it'd be useful for in our case.
+func (ne nopEncoder) Clone() zapcore.Encoder { return ne }
+
+// EncodeEntry partially implements the zapcore.Encoder interface.
+func (nopEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+ return bufferpool.Get(), nil
+}
+
+// Interface guard
+var _ zapcore.Encoder = (*nopEncoder)(nil)
diff --git a/replacer.go b/replacer.go
index 420259bb1..c82d81dc0 100644
--- a/replacer.go
+++ b/replacer.go
@@ -15,6 +15,7 @@
package caddy
import (
+ "fmt"
"os"
"path/filepath"
"runtime"
@@ -28,6 +29,8 @@ type Replacer interface {
Delete(variable string)
Map(ReplacementFunc)
ReplaceAll(input, empty string) string
+ ReplaceKnown(input, empty string) string
+ ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error)
}
// NewReplacer returns a new Replacer.
@@ -70,12 +73,34 @@ func (r *replacer) fromStatic(key string) (val string, ok bool) {
return
}
+// ReplaceOrErr is like ReplaceAll, but any placeholders
+// that are empty or not recognized will cause an error to
+// be returned.
+func (r *replacer) ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error) {
+ return r.replace(input, "", false, errOnEmpty, errOnUnknown)
+}
+
+// ReplaceKnown is like ReplaceAll but only replaces
+// placeholders that are known (recognized). Unrecognized
+// placeholders will remain in the output.
+func (r *replacer) ReplaceKnown(input, empty string) string {
+ out, _ := r.replace(input, empty, false, false, false)
+ return out
+}
+
// ReplaceAll efficiently replaces placeholders in input with
-// their values. Unrecognized placeholders will not be replaced.
-// Values that are empty string will be substituted with empty.
+// their values. All placeholders are replaced in the output
+// whether they are recognized or not. Values that are empty
+// string will be substituted with empty.
func (r *replacer) ReplaceAll(input, empty string) string {
+ out, _ := r.replace(input, empty, true, false, false)
+ return out
+}
+
+func (r *replacer) replace(input, empty string,
+ treatUnknownAsEmpty, errOnEmpty, errOnUnknown bool) (string, error) {
if !strings.Contains(input, string(phOpen)) {
- return input
+ return input, nil
}
var sb strings.Builder
@@ -100,24 +125,39 @@ func (r *replacer) ReplaceAll(input, empty string) string {
// trim opening bracket
key := input[i+1 : end]
- // try to get a value for this key; if
- // the key is not recognized, do not
- // perform any replacement
+ // try to get a value for this key,
+ // handle empty values accordingly
var found bool
for _, mapFunc := range r.providers {
if val, ok := mapFunc(key); ok {
found = true
- if val != "" {
+ if val == "" {
+ if errOnEmpty {
+ return "", fmt.Errorf("evaluated placeholder %s%s%s is empty",
+ string(phOpen), key, string(phClose))
+ } else if empty != "" {
+ sb.WriteString(empty)
+ }
+ } else {
sb.WriteString(val)
- } else if empty != "" {
- sb.WriteString(empty)
}
break
}
}
if !found {
- lastWriteCursor = i
- continue
+ // placeholder is unknown (unrecognized), handle accordingly
+ switch {
+ case errOnUnknown:
+ return "", fmt.Errorf("unrecognized placeholder %s%s%s",
+ string(phOpen), key, string(phClose))
+ case treatUnknownAsEmpty:
+ if empty != "" {
+ sb.WriteString(empty)
+ }
+ default:
+ lastWriteCursor = i
+ continue
+ }
}
// advance cursor to end of placeholder
@@ -128,7 +168,7 @@ func (r *replacer) ReplaceAll(input, empty string) string {
// flush any unwritten remainder
sb.WriteString(input[lastWriteCursor:])
- return sb.String()
+ return sb.String(), nil
}
// ReplacementFunc is a function that returns a replacement
@@ -142,8 +182,7 @@ func globalDefaultReplacements(key string) (string, bool) {
// check environment variable
const envPrefix = "env."
if strings.HasPrefix(key, envPrefix) {
- val := os.Getenv(key[len(envPrefix):])
- return val, val != ""
+ return os.Getenv(key[len(envPrefix):]), true
}
switch key {
diff --git a/replacer_test.go b/replacer_test.go
index a30a75d1c..435582bb6 100644
--- a/replacer_test.go
+++ b/replacer_test.go
@@ -85,7 +85,7 @@ func TestReplacerSet(t *testing.T) {
}
}
-func TestReplacerReplaceAll(t *testing.T) {
+func TestReplacerReplaceKnown(t *testing.T) {
rep := replacer{
providers: []ReplacementFunc{
// split our possible vars to two functions (to test if both functions are called)
@@ -148,7 +148,7 @@ func TestReplacerReplaceAll(t *testing.T) {
expected: "val1 {nope} test-123 ",
},
} {
- actual := rep.ReplaceAll(tc.testInput, "EMPTY")
+ actual := rep.ReplaceKnown(tc.testInput, "EMPTY")
// test if all are replaced as expected
if actual != tc.expected {
diff --git a/sigtrap.go b/sigtrap.go
index 76bb6664a..88dbb6da6 100644
--- a/sigtrap.go
+++ b/sigtrap.go
@@ -15,9 +15,10 @@
package caddy
import (
- "log"
"os"
"os/signal"
+
+ "go.uber.org/zap"
)
// TrapSignals create signal/interrupt handlers as best it can for the
@@ -41,11 +42,11 @@ func trapSignalsCrossPlatform() {
<-shutdown
if i > 0 {
- log.Println("[INFO] SIGINT: Force quit")
+ Log().Warn("force quit", zap.String("signal", "SIGINT"))
os.Exit(ExitCodeForceQuit)
}
- log.Println("[INFO] SIGINT: Shutting down")
+ Log().Info("shutting down", zap.String("signal", "SIGINT"))
go gracefulStop("SIGINT")
}
}()
@@ -57,11 +58,14 @@ func gracefulStop(sigName string) {
err := stopAndCleanup()
if err != nil {
- log.Printf("[ERROR] %s stop: %v", sigName, err)
+ Log().Error("stopping",
+ zap.String("signal", sigName),
+ zap.Error(err),
+ )
exitCode = ExitCodeFailedQuit
}
- log.Printf("[INFO] %s: Shutdown done", sigName)
+ Log().Info("shutdown done", zap.String("signal", sigName))
os.Exit(exitCode)
}
diff --git a/sigtrap_posix.go b/sigtrap_posix.go
index 63e6a31aa..4f081af98 100644
--- a/sigtrap_posix.go
+++ b/sigtrap_posix.go
@@ -17,12 +17,12 @@
package caddy
import (
- "log"
"os"
"os/signal"
"syscall"
"github.com/mholt/certmagic"
+ "go.uber.org/zap"
)
// trapSignalsPosix captures POSIX-only signals.
@@ -34,23 +34,23 @@ func trapSignalsPosix() {
for sig := range sigchan {
switch sig {
case syscall.SIGQUIT:
- log.Println("[INFO] SIGQUIT: Quitting process immediately")
+ Log().Info("quitting process immediately", zap.String("signal", "SIGQUIT"))
certmagic.CleanUpOwnLocks() // try to clean up locks anyway, it's important
os.Exit(ExitCodeForceQuit)
case syscall.SIGTERM:
- log.Println("[INFO] SIGTERM: Shutting down apps then terminating")
+ Log().Info("shutting down apps then terminating", zap.String("signal", "SIGTERM"))
gracefulStop("SIGTERM")
case syscall.SIGUSR1:
- log.Println("[INFO] SIGUSR1: Not implemented")
+ Log().Info("not implemented", zap.String("signal", "SIGUSR1"))
case syscall.SIGUSR2:
- log.Println("[INFO] SIGUSR2: Not implemented")
+ Log().Info("not implemented", zap.String("signal", "SIGUSR2"))
case syscall.SIGHUP:
// ignore; this signal is sometimes sent outside of the user's control
- log.Println("[INFO] SIGHUP: Not implemented")
+ Log().Info("not implemented", zap.String("signal", "SIGHUP"))
}
}
}()
diff --git a/usagepool.go b/usagepool.go
index dd4f60671..df69caffc 100644
--- a/usagepool.go
+++ b/usagepool.go
@@ -21,23 +21,136 @@ import (
)
// UsagePool is a thread-safe map that pools values
-// based on usage; a LoadOrStore operation increments
-// the usage, and a Delete decrements from the usage.
-// If the usage count reaches 0, the value will be
-// removed from the map. There is no way to overwrite
-// existing keys in the pool without first deleting
-// it as many times as it was stored. Deleting too
-// many times will panic.
+// based on usage (reference counting). Values are
+// only inserted if they do not already exist. There
+// are two ways to add values to the pool:
+//
+// 1) LoadOrStore will increment usage and store the
+// value immediately if it does not already exist
+// 2) LoadOrNew will increment usage and construct the
+// value immediately if it does not already exist,
+// then store that value in the pool. When the
+// constructed value is finally deleted from the
+// pool (after its usage reaches 0), it will be
+// cleaned up by calling its Destruct method.
+//
+// The use of LoadOrNew allows values to be created
+// and reused and finally cleaned up only once, even
+// though they may have many references throughout
+// their lifespan. This is helpful, for example, when
+// sharing thread-safe io.Writers that you only want
+// to open and close once.
+//
+// There is no way to overwrite existing keys in the
+// pool without first deleting it as many times as it
+// was stored. Deleting too many times will panic.
+//
+// The implementation does not use a sync.Pool because
+// UsagePool needs additional atomicity to run the
+// constructor functions when creating a new value when
+// LoadOrNew is used. (We could probably use sync.Pool
+// but we'd still have to layer our own additional locks
+// on top.)
//
// An empty UsagePool is NOT safe to use; always call
-// NewUsagePool() to make a new value.
+// NewUsagePool() to make a new one.
type UsagePool struct {
- pool *sync.Map
+ sync.RWMutex
+ pool map[interface{}]*usagePoolVal
}
-// NewUsagePool returns a new usage pool.
+// NewUsagePool returns a new usage pool that is ready to use.
func NewUsagePool() *UsagePool {
- return &UsagePool{pool: new(sync.Map)}
+ return &UsagePool{
+ pool: make(map[interface{}]*usagePoolVal),
+ }
+}
+
+// LoadOrNew loads the value associated with key from the pool if it
+// already exists. If the key doesn't exist, it will call construct
+// to create a new value and then stores that in the pool. An error
+// is only returned if the constructor returns an error. The loaded
+// or constructed value is returned. The loaded return value is true
+// if the value already existed and was loaded, or false if it was
+// newly constructed.
+func (up *UsagePool) LoadOrNew(key interface{}, construct Constructor) (value interface{}, loaded bool, err error) {
+ var upv *usagePoolVal
+ up.Lock()
+ upv, loaded = up.pool[key]
+ if loaded {
+ atomic.AddInt32(&upv.refs, 1)
+ up.Unlock()
+ upv.RLock()
+ value = upv.value
+ err = upv.err
+ upv.RUnlock()
+ } else {
+ upv = &usagePoolVal{refs: 1}
+ upv.Lock()
+ up.pool[key] = upv
+ up.Unlock()
+ value, err = construct()
+ if err == nil {
+ upv.value = value
+ } else {
+ // TODO: remove error'ed entries from map
+ upv.err = err
+ }
+ upv.Unlock()
+ }
+ return
+}
+
+// LoadOrStore loads the value associated with key from the pool if it
+// already exists, or stores it if it does not exist. It returns the
+// value that was either loaded or stored, and true if the value already
+// existed and was
+func (up *UsagePool) LoadOrStore(key, val interface{}) (value interface{}, loaded bool) {
+ var upv *usagePoolVal
+ up.Lock()
+ upv, loaded = up.pool[key]
+ if loaded {
+ atomic.AddInt32(&upv.refs, 1)
+ up.Unlock()
+ upv.Lock()
+ if upv.err == nil {
+ value = upv.value
+ } else {
+ upv.value = val
+ upv.err = nil
+ }
+ upv.Unlock()
+ } else {
+ upv = &usagePoolVal{refs: 1, value: val}
+ up.pool[key] = upv
+ up.Unlock()
+ value = val
+ }
+ return
+}
+
+// Range iterates the pool similarly to how sync.Map.Range() does:
+// it calls f for every key in the pool, and if f returns false,
+// iteration is stopped. Ranging does not affect usage counts.
+//
+// This method is somewhat naive and acquires a read lock on the
+// entire pool during iteration, so do your best to make f() really
+// fast, m'kay?
+func (up *UsagePool) Range(f func(key, value interface{}) bool) {
+ up.RLock()
+ defer up.RUnlock()
+ for key, upv := range up.pool {
+ upv.RLock()
+ if upv.err != nil {
+ upv.RUnlock()
+ continue
+ }
+ val := upv.value
+ upv.RUnlock()
+ if !f(key, val) {
+ break
+ }
+ }
}
// Delete decrements the usage count for key and removes the
@@ -45,50 +158,47 @@ func NewUsagePool() *UsagePool {
// true if the usage count reached 0 and the value was deleted.
// It panics if the usage count drops below 0; always call
// Delete precisely as many times as LoadOrStore.
-func (up *UsagePool) Delete(key interface{}) (deleted bool) {
- usageVal, ok := up.pool.Load(key)
+func (up *UsagePool) Delete(key interface{}) (deleted bool, err error) {
+ up.Lock()
+ upv, ok := up.pool[key]
if !ok {
- return false
+ up.Unlock()
+ return false, nil
}
- upv := usageVal.(*usagePoolVal)
- newUsage := atomic.AddInt32(&upv.usage, -1)
- if newUsage == 0 {
- up.pool.Delete(key)
- return true
- } else if newUsage < 0 {
- panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)",
- upv.value, upv.usage))
- }
- return false
-}
-
-// LoadOrStore puts val in the pool and returns false if key does
-// not already exist; otherwise if the key exists, it loads the
-// existing value, increments the usage for that value, and returns
-// the value along with true.
-func (up *UsagePool) LoadOrStore(key, val interface{}) (actual interface{}, loaded bool) {
- usageVal := &usagePoolVal{
- usage: 1,
- value: val,
- }
- actual, loaded = up.pool.LoadOrStore(key, usageVal)
- if loaded {
- upv := actual.(*usagePoolVal)
- actual = upv.value
- atomic.AddInt32(&upv.usage, 1)
+ refs := atomic.AddInt32(&upv.refs, -1)
+ if refs == 0 {
+ delete(up.pool, key)
+ up.Unlock()
+ upv.RLock()
+ val := upv.value
+ upv.RUnlock()
+ if destructor, ok := val.(Destructor); ok {
+ err = destructor.Destruct()
+ }
+ deleted = true
+ } else {
+ up.Unlock()
+ if refs < 0 {
+ panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)",
+ upv.value, upv.refs))
+ }
}
return
}
-// Range iterates the pool the same way sync.Map.Range does.
-// This does not affect usage counts.
-func (up *UsagePool) Range(f func(key, value interface{}) bool) {
- up.pool.Range(func(key, value interface{}) bool {
- return f(key, value.(*usagePoolVal).value)
- })
+// Constructor is a function that returns a new value
+// that can destruct itself when it is no longer needed.
+type Constructor func() (Destructor, error)
+
+// Destructor is a value that can clean itself up when
+// it is deallocated.
+type Destructor interface {
+ Destruct() error
}
type usagePoolVal struct {
- usage int32 // accessed atomically; must be 64-bit aligned for 32-bit systems
+ refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems
value interface{}
+ err error
+ sync.RWMutex
}