summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.gitignore7
-rw-r--r--config/config.go190
-rw-r--r--config/directives.go257
-rw-r--r--config/lexer.go146
-rw-r--r--config/parser.go84
-rw-r--r--config/parsing.go90
-rw-r--r--main.go39
-rw-r--r--middleware/extensionless.go44
-rw-r--r--middleware/gzip.go40
-rw-r--r--middleware/headers.go35
-rw-r--r--middleware/log.go42
-rw-r--r--middleware/middleware.go11
-rw-r--r--middleware/redirect.go23
-rw-r--r--middleware/rewrite.go23
-rw-r--r--middleware/util_recorder.go45
-rw-r--r--middleware/util_replacer.go91
-rw-r--r--server/server.go176
17 files changed, 1343 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..07a41df41
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+Thumbs.db
+_gitignore/
+error.log
+access.log
+/*.conf
+Caddyfile \ No newline at end of file
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 000000000..cf5214d92
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,190 @@
+// Package config contains utilities and types necessary for
+// launching specially-configured server instances.
+package config
+
+import "os"
+
+// Load loads a configuration file, parses it,
+// and returns a slice of Config structs which
+// can be used to create and configure server
+// instances.
+func Load(filename string) ([]Config, error) {
+ p := parser{}
+ err := p.lexer.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer p.lexer.Close()
+ return p.Parse()
+}
+
+// IsNotFound returns whether or not the error is
+// one which indicates that the configuration file
+// was not found. (Useful for checking the error
+// returned from Load).
+func IsNotFound(err error) bool {
+ return os.IsNotExist(err)
+}
+
+// Default makes a default configuration
+// that's empty except for root, host, and port,
+// which are essential for serving the cwd.
+func Default() []Config {
+ cfg := []Config{
+ Config{
+ Root: defaultRoot,
+ Host: defaultHost,
+ Port: defaultPort,
+ },
+ }
+ return cfg
+}
+
+// config represents a server configuration. It
+// is populated by parsing a config file. (Use
+// the Load function.)
+type Config struct {
+ Host string
+ Port string
+ Root string
+ Gzip bool
+ RequestLog Log
+ ErrorLog Log
+ Rewrites []Rewrite
+ Redirects []Redirect
+ Extensions []string
+ ErrorPages map[int]string // Map of HTTP status code to filename
+ Headers []Headers
+ TLS TLSConfig
+}
+
+// Address returns the host:port of c as a string.
+func (c Config) Address() string {
+ return c.Host + ":" + c.Port
+}
+
+// Rewrite describes an internal location rewrite.
+type Rewrite struct {
+ From string
+ To string
+}
+
+// Redirect describes an HTTP redirect.
+type Redirect struct {
+ From string
+ To string
+ Code int
+}
+
+// Log represents the settings for a log.
+type Log struct {
+ Enabled bool
+ OutputFile string
+ Format string
+}
+
+// Headers groups a slice of HTTP headers by a URL pattern.
+type Headers struct {
+ Url string
+ Headers []Header
+}
+
+// Header represents a single HTTP header, simply a name and value.
+type Header struct {
+ Name string
+ Value string
+}
+
+// TLSConfig describes how TLS should be configured and used,
+// if at all. At least a certificate and key are required.
+type TLSConfig struct {
+ Enabled bool
+ Certificate string
+ Key string
+}
+
+// httpRedirs is a list of supported HTTP redirect codes.
+var httpRedirs = map[string]int{
+ "300": 300,
+ "301": 301,
+ "302": 302,
+ "303": 303,
+ "304": 304,
+ "305": 305,
+ "306": 306,
+ "307": 307,
+ "308": 308,
+}
+
+// httpErrors is a list of supported HTTP error codes.
+var httpErrors = map[string]int{
+ "400": 400,
+ "401": 401,
+ "402": 402,
+ "403": 403,
+ "404": 404,
+ "405": 405,
+ "406": 406,
+ "407": 407,
+ "408": 408,
+ "409": 409,
+ "410": 410,
+ "411": 411,
+ "412": 412,
+ "413": 413,
+ "414": 414,
+ "415": 415,
+ "416": 416,
+ "417": 417,
+ "418": 418,
+ "419": 419,
+ "420": 420,
+ "422": 422,
+ "423": 423,
+ "424": 424,
+ "426": 426,
+ "428": 428,
+ "429": 429,
+ "431": 431,
+ "440": 440,
+ "444": 444,
+ "449": 449,
+ "450": 450,
+ "451": 451,
+ "494": 494,
+ "495": 495,
+ "496": 496,
+ "497": 497,
+ "498": 498,
+ "499": 499,
+ "500": 500,
+ "501": 501,
+ "502": 502,
+ "503": 503,
+ "504": 504,
+ "505": 505,
+ "506": 506,
+ "507": 507,
+ "508": 508,
+ "509": 509,
+ "510": 510,
+ "511": 511,
+ "520": 520,
+ "521": 521,
+ "522": 522,
+ "523": 523,
+ "524": 524,
+ "598": 598,
+ "599": 599,
+}
+
+const (
+ defaultHost = "localhost"
+ defaultPort = "8080"
+ defaultRoot = "."
+)
+
+const (
+ DefaultRequestsLog = "requests.log"
+ DefaultErrorsLog = "errors.log"
+)
diff --git a/config/directives.go b/config/directives.go
new file mode 100644
index 000000000..01ddb3b19
--- /dev/null
+++ b/config/directives.go
@@ -0,0 +1,257 @@
+package config
+
+// dirFunc is a type of parsing function which processes
+// a particular directive and populates the config.
+type dirFunc func(*parser) error
+
+// validDirectives is a map of valid directive names to
+// their parsing function.
+var validDirectives map[string]dirFunc
+
+func init() {
+ // This has to be in the init function
+ // to avoid an initialization loop error because
+ // the 'import' directive (key) in this map
+ // invokes a method that uses this map.
+ validDirectives = map[string]dirFunc{
+ "root": func(p *parser) error {
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ p.cfg.Root = p.tkn()
+ return nil
+ },
+ "import": func(p *parser) error {
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+
+ p2 := parser{}
+ err := p2.lexer.Load(p.tkn())
+ if err != nil {
+ return p.err("Parse", err.Error())
+ }
+ defer p2.lexer.Close()
+
+ p2.cfg = p.cfg
+ err = p2.directives()
+ if err != nil {
+ return err
+ }
+ p.cfg = p2.cfg
+
+ return nil
+ },
+ "gzip": func(p *parser) error {
+ p.cfg.Gzip = true
+ return nil
+ },
+ "log": func(p *parser) error {
+ log := Log{Enabled: true}
+
+ // Get the type of log (requests, errors, etc.)
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ logWhat := p.tkn()
+
+ // Set the log output file
+ if p.lexer.NextArg() {
+ log.OutputFile = p.tkn()
+ }
+
+ // Set the log output format
+ if p.lexer.NextArg() {
+ log.Format = p.tkn()
+ }
+
+ switch logWhat {
+ case "requests":
+ if log.OutputFile == "" || log.OutputFile == "_" {
+ log.OutputFile = DefaultRequestsLog
+ }
+ p.cfg.RequestLog = log
+ case "errors":
+ if log.OutputFile == "" || log.OutputFile == "_" {
+ log.OutputFile = DefaultErrorsLog
+ }
+ p.cfg.ErrorLog = log
+ default:
+ return p.err("Parse", "Unknown log '"+logWhat+"'")
+ }
+
+ return nil
+ },
+ "rewrite": func(p *parser) error {
+ var rw Rewrite
+
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ rw.From = p.tkn()
+
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ rw.To = p.tkn()
+
+ p.cfg.Rewrites = append(p.cfg.Rewrites, rw)
+ return nil
+ },
+ "redir": func(p *parser) error {
+ var redir Redirect
+
+ // From
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ redir.From = p.tkn()
+
+ // To
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ redir.To = p.tkn()
+
+ // Status Code
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ if code, ok := httpRedirs[p.tkn()]; !ok {
+ return p.err("Parse", "Invalid redirect code '"+p.tkn()+"'")
+ } else {
+ redir.Code = code
+ }
+
+ p.cfg.Redirects = append(p.cfg.Redirects, redir)
+ return nil
+ },
+ "ext": func(p *parser) error {
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ p.cfg.Extensions = append(p.cfg.Extensions, p.tkn())
+ for p.lexer.NextArg() {
+ p.cfg.Extensions = append(p.cfg.Extensions, p.tkn())
+ }
+ return nil
+ },
+ "error": func(p *parser) error {
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ if code, ok := httpErrors[p.tkn()]; !ok {
+ return p.err("Syntax", "Invalid error code '"+p.tkn()+"'")
+ } else if val, exists := p.cfg.ErrorPages[code]; exists {
+ return p.err("Config", p.tkn()+" error page already configured to be '"+val+"'")
+ } else {
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ p.cfg.ErrorPages[code] = p.tkn()
+ }
+ return nil
+ },
+ "header": func(p *parser) error {
+ var head Headers
+ var isNewPattern bool
+
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ pattern := p.tkn()
+
+ // See if we already have a definition for this URL pattern...
+ for _, h := range p.cfg.Headers {
+ if h.Url == pattern {
+ head = h
+ break
+ }
+ }
+
+ // ...otherwise, this is a new pattern
+ if head.Url == "" {
+ head.Url = pattern
+ isNewPattern = true
+ }
+
+ processHeaderBlock := func() error {
+ err := p.openCurlyBrace()
+ if err != nil {
+ return err
+ }
+ for p.lexer.Next() {
+ if p.tkn() == "}" {
+ break
+ }
+ h := Header{Name: p.tkn()}
+ if p.lexer.NextArg() {
+ h.Value = p.tkn()
+ }
+ head.Headers = append(head.Headers, h)
+ }
+ err = p.closeCurlyBrace()
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // A single header could be declared on the same line, or
+ // multiple headers can be grouped by URL pattern, so we have
+ // to look for both here.
+ if p.lexer.NextArg() {
+ if p.tkn() == "{" {
+ err := processHeaderBlock()
+ if err != nil {
+ return err
+ }
+ } else {
+ h := Header{Name: p.tkn()}
+ if p.lexer.NextArg() {
+ h.Value = p.tkn()
+ }
+ head.Headers = append(head.Headers, h)
+ }
+ } else {
+ // Okay, it might be an opening curly brace on the next line
+ if !p.lexer.Next() {
+ return p.eofErr()
+ }
+ err := processHeaderBlock()
+ if err != nil {
+ return err
+ }
+ }
+
+ if isNewPattern {
+ p.cfg.Headers = append(p.cfg.Headers, head)
+ } else {
+ for i := 0; i < len(p.cfg.Headers); i++ {
+ if p.cfg.Headers[i].Url == pattern {
+ p.cfg.Headers[i] = head
+ break
+ }
+ }
+ }
+
+ return nil
+ },
+ "tls": func(p *parser) error {
+ tls := TLSConfig{Enabled: true}
+
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ tls.Certificate = p.tkn()
+
+ if !p.lexer.NextArg() {
+ return p.argErr()
+ }
+ tls.Key = p.tkn()
+
+ p.cfg.TLS = tls
+ return nil
+ },
+ }
+}
diff --git a/config/lexer.go b/config/lexer.go
new file mode 100644
index 000000000..b504de811
--- /dev/null
+++ b/config/lexer.go
@@ -0,0 +1,146 @@
+package config
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "unicode"
+)
+
+// Lexer is a utility which can get values, token by
+// token, from a config file. A token is a word, and tokens
+// are separated by whitespace. A word can be enclosed in
+// quotes if it contains whitespace.
+type lexer struct {
+ file *os.File
+ reader *bufio.Reader
+ token token
+ line int
+}
+
+// Load opens a file and prepares to scan the file.
+func (l *lexer) Load(filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ l.reader = bufio.NewReader(f)
+ l.file = f
+ l.line = 1
+ return nil
+}
+
+// Close closes the file.
+func (l *lexer) Close() {
+ l.file.Close()
+}
+
+// Next gets the next token from the input. The resulting token
+// is in l.token if next returns true. If Next returns false,
+// there are no more tokens.
+func (l *lexer) Next() bool {
+ return l.next(true)
+}
+
+// NextArg works just like Next, but returns false if the next
+// token is not on the same line as the one before. This method
+// makes it easier to throw syntax errors when more values are
+// expected on the same line.
+func (l *lexer) NextArg() bool {
+ return l.next(false)
+}
+
+// next gets the next token according to newlineOK, which
+// specifies whether it's OK if the next token is on another
+// line. Returns true if there was a new token loaded, false
+// otherwise.
+func (l *lexer) next(newlineOK bool) bool {
+ var val []rune
+ var comment, quoted, escaped bool
+
+ makeToken := func() bool {
+ l.token.text = string(val)
+ return true
+ }
+
+ for {
+ ch, _, err := l.reader.ReadRune()
+ if err != nil {
+ if len(val) > 0 {
+ return makeToken()
+ }
+ if err == io.EOF {
+ return false
+ } else {
+ panic(err)
+ }
+ }
+
+ if quoted {
+ if !escaped {
+ if ch == '\\' {
+ escaped = true
+ continue
+ } else if ch == '"' {
+ quoted = false
+ return makeToken()
+ }
+ }
+ if ch == '\\' && !escaped {
+ escaped = true
+ continue
+ }
+ if ch == '\n' {
+ l.line++
+ }
+ val = append(val, ch)
+ escaped = false
+ continue
+ }
+
+ if unicode.IsSpace(ch) {
+ if ch == '\n' {
+ l.line++
+ comment = false
+ }
+ if len(val) > 0 {
+ return makeToken()
+ } else if !newlineOK {
+ err := l.reader.UnreadRune()
+ if err != nil {
+ panic(err)
+ }
+ if ch == '\n' {
+ l.line--
+ }
+ return false
+ }
+ continue
+ }
+
+ if ch == '#' {
+ comment = true
+ }
+
+ if comment {
+ continue
+ }
+
+ if len(val) == 0 {
+ l.token = token{line: l.line}
+ if ch == '"' {
+ quoted = true
+ continue
+ }
+ }
+
+ val = append(val, ch)
+ }
+}
+
+// A token represents a single valuable/processable unit
+// in a config file.
+type token struct {
+ line int
+ text string
+}
diff --git a/config/parser.go b/config/parser.go
new file mode 100644
index 000000000..1ccd7b910
--- /dev/null
+++ b/config/parser.go
@@ -0,0 +1,84 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// parser is a type which can parse config files.
+type parser struct {
+ lexer lexer
+ cfg Config
+}
+
+// Parse parses the configuration file. It produces a slice of Config
+// structs which can be used to create and configure server instances.
+func (p *parser) Parse() ([]Config, error) {
+ var configs []Config
+
+ for p.lexer.Next() {
+ p.cfg = Config{ErrorPages: make(map[int]string)}
+
+ err := p.parse()
+ if err != nil {
+ return configs, err
+ }
+
+ configs = append(configs, p.cfg)
+ }
+
+ return configs, nil
+}
+
+// tkn is shorthand to get the text/value of the current token.
+func (p *parser) tkn() string {
+ return p.lexer.token.text
+}
+
+// line is shorthand to get the line number of the current token.
+func (p *parser) line() int {
+ return p.lexer.token.line
+}
+
+// syntaxErr creates a syntax error which explains what was
+// found and expected.
+func (p *parser) syntaxErr(expected string) error {
+ return p.err("Syntax", fmt.Sprintf("Unexpected token '%s', expecting '%s'", p.tkn(), expected))
+}
+
+// syntaxErr creates a syntax error that explains that there
+// weren't enough arguments on the line.
+func (p *parser) argErr() error {
+ return p.err("Syntax", "Unexpected line break after '"+p.tkn()+"' (missing arguments?)")
+}
+
+// eofErr creates a syntax error describing an unexpected EOF.
+func (p *parser) eofErr() error {
+ return p.err("Syntax", "Unexpected EOF")
+}
+
+// err creates a "{{kind}} error: ..." with a custom message msg. The
+// file name and line number are included in the error message.
+func (p *parser) err(kind, msg string) error {
+ msg = fmt.Sprintf("%s error: %s:%d - %s", kind, p.lexer.file.Name(), p.line(), msg)
+ return errors.New(msg)
+}
+
+// parseAddress takes a host:port string (val), and returns the host
+// and port as separate values. If either value that is missing, the
+// default will be used.4
+func (p *parser) parseAddress(val string) (string, string) {
+ if val == "" {
+ return defaultHost, defaultPort
+ }
+ parts := strings.SplitN(val, ":", 3)
+ if parts[0] == "" {
+ parts[0] = defaultHost
+ }
+ if len(parts) == 1 || parts[1] == "" {
+ return parts[0], defaultPort
+ } else {
+ return parts[0], parts[1]
+ }
+}
diff --git a/config/parsing.go b/config/parsing.go
new file mode 100644
index 000000000..baa660864
--- /dev/null
+++ b/config/parsing.go
@@ -0,0 +1,90 @@
+package config
+
+// This file contains the recursive-descent parsing
+// functions.
+
+// parse is the top of the recursive-descent parsing.
+// It parses at most 1 server configuration (an address
+// and its directives).
+func (p *parser) parse() error {
+ err := p.address()
+ if err != nil {
+ return err
+ }
+
+ err = p.addressBlock()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// address expects that the current token is a host:port
+// combination.
+func (p *parser) address() error {
+ p.cfg.Host, p.cfg.Port = p.parseAddress(p.tkn())
+ p.lexer.Next()
+ return nil
+}
+
+// addressBlock leads into parsing directives. It
+// handles directives enclosed by curly braces and
+// directives not enclosed by curly braces.
+func (p *parser) addressBlock() error {
+ err := p.openCurlyBrace()
+ if err != nil {
+ // meh, single-server configs don't need curly braces
+ return p.directives()
+ }
+
+ err = p.directives()
+ if err != nil {
+ return err
+ }
+
+ err = p.closeCurlyBrace()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// openCurlyBrace expects the current token to be an
+// opening curly brace.
+func (p *parser) openCurlyBrace() error {
+ if p.tkn() != "{" {
+ return p.syntaxErr("{")
+ }
+ return nil
+}
+
+// closeCurlyBrace expects the current token to be
+// a closing curly brace.
+func (p *parser) closeCurlyBrace() error {
+ if p.tkn() != "}" {
+ return p.syntaxErr("}")
+ }
+ return nil
+}
+
+// directives parses through all the directives
+// and it expects the current token to be the first
+// directive. It goes until EOF or closing curly
+// brace.
+func (p *parser) directives() error {
+ for p.lexer.Next() {
+ if p.tkn() == "}" {
+ break
+ }
+ if fn, ok := validDirectives[p.tkn()]; !ok {
+ return p.syntaxErr("[directive]")
+ } else {
+ err := fn(p)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/main.go b/main.go
new file mode 100644
index 000000000..88fdaa91f
--- /dev/null
+++ b/main.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+ "log"
+ "sync"
+
+ "github.com/mholt/caddy/config"
+ "github.com/mholt/caddy/server"
+)
+
+func main() {
+ var wg sync.WaitGroup
+
+ vhosts, err := config.Load("Caddyfile")
+ if err != nil {
+ if config.IsNotFound(err) {
+ vhosts = config.Default()
+ } else {
+ log.Fatal(err)
+ }
+ }
+
+ for _, conf := range vhosts {
+ s, err := server.New(conf)
+ if err != nil {
+ log.Fatal(err)
+ }
+ wg.Add(1)
+ go func(s *server.Server) {
+ defer wg.Done()
+ err := s.Serve()
+ if err != nil {
+ s.Log(err)
+ }
+ }(s)
+ }
+
+ wg.Wait()
+}
diff --git a/middleware/extensionless.go b/middleware/extensionless.go
new file mode 100644
index 000000000..fb19aefe3
--- /dev/null
+++ b/middleware/extensionless.go
@@ -0,0 +1,44 @@
+package middleware
+
+import (
+ "net/http"
+ "os"
+ "strings"
+)
+
+// Extensionless is middleware for clean URLs. A root path is
+// passed in as well as possible extensions to add, internally,
+// to paths requested. The first path+ext that matches a resource
+// that exists will be used.
+func Extensionless(root string, extensions []string) Middleware {
+ resourceExists := func(path string) bool {
+ _, err := os.Stat(root + path)
+ // technically we should use os.IsNotExist(err)
+ // but we don't handle any other error types anyway
+ return err == nil
+ }
+
+ hasExt := func(r *http.Request) bool {
+ if r.URL.Path[len(r.URL.Path)-1] == '/' {
+ // directory
+ return true
+ }
+ lastSep := strings.LastIndex(r.URL.Path, "/")
+ lastDot := strings.LastIndex(r.URL.Path, ".")
+ return lastDot > lastSep
+ }
+
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if !hasExt(r) {
+ for _, ext := range extensions {
+ if resourceExists(r.URL.Path + ext) {
+ r.URL.Path = r.URL.Path + ext
+ break
+ }
+ }
+ }
+ next(w, r)
+ }
+ }
+}
diff --git a/middleware/gzip.go b/middleware/gzip.go
new file mode 100644
index 000000000..ec01c8e3d
--- /dev/null
+++ b/middleware/gzip.go
@@ -0,0 +1,40 @@
+package middleware
+
+import (
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// Adapted from https://gist.github.com/the42/1956518
+
+// Gzip is middleware that gzip-compresses the response.
+func Gzip(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ next(w, r)
+ return
+ }
+ w.Header().Set("Content-Encoding", "gzip")
+ gzipWriter := gzip.NewWriter(w)
+ defer gzipWriter.Close()
+ gz := gzipResponseWriter{Writer: gzipWriter, ResponseWriter: w}
+ next(gz, r)
+ }
+}
+
+// gzipResponeWriter wraps the underlying Write method
+// with a gzip.Writer to compress the output.
+type gzipResponseWriter struct {
+ io.Writer
+ http.ResponseWriter
+}
+
+// Write wraps the underlying Write method to do compression.
+func (w gzipResponseWriter) Write(b []byte) (int, error) {
+ if w.Header().Get("Content-Type") == "" {
+ w.Header().Set("Content-Type", http.DetectContentType(b))
+ }
+ return w.Writer.Write(b)
+}
diff --git a/middleware/headers.go b/middleware/headers.go
new file mode 100644
index 000000000..f47953c4b
--- /dev/null
+++ b/middleware/headers.go
@@ -0,0 +1,35 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/mholt/caddy/config"
+)
+
+// Headers is middleware that adds headers to the responses
+// for requests matching a certain path.
+func Headers(headers []config.Headers) Middleware {
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ for _, rule := range headers {
+ if pathsMatch(r.URL.Path, rule.Url) {
+ for _, header := range rule.Headers {
+ w.Header().Set(header.Name, header.Value)
+ }
+ }
+ }
+ next(w, r)
+ }
+ }
+}
+
+// Returns whether or not p1 and p2 are matching
+// paths. This can be defined a number of ways
+// and it is not for sure yet how to match URL/path
+// strings. It may be a prefix match or a full
+// string match, it may strip trailing slashes.
+// Until the software hits 1.0, this will be in flux.
+func pathsMatch(p1, p2 string) bool {
+ return strings.HasPrefix(p1, p2)
+}
diff --git a/middleware/log.go b/middleware/log.go
new file mode 100644
index 000000000..5b2022871
--- /dev/null
+++ b/middleware/log.go
@@ -0,0 +1,42 @@
+package middleware
+
+import (
+ "log"
+ "net/http"
+)
+
+func RequestLog(logger *log.Logger, format string) Middleware {
+ if format == "" {
+ format = defaultReqLogFormat
+ }
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ sw := newResponseRecorder(w)
+ next(sw, r)
+ rep := newReplacer(r, sw)
+ logger.Println(rep.replace(format))
+ }
+ }
+}
+
+// TODO.
+func ErrorLog(logger *log.Logger, format string) Middleware {
+ if format == "" {
+ format = defaultErrLogFormat
+ }
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ sw := newResponseRecorder(w)
+ next(sw, r)
+ // This is still TODO -- we need to define what constitutes an error to be logged
+ //logger.Println("TODO")
+ }
+ }
+}
+
+const (
+ commonLogFormat = `{remote} ` + emptyStringReplacer + ` [{time}] "{method} {uri} {proto}" {status} {size}`
+ combinedLogFormat = commonLogFormat + ` "{>Referer}" "{>User-Agent}"`
+ defaultReqLogFormat = commonLogFormat
+ defaultErrLogFormat = "[TODO]"
+)
diff --git a/middleware/middleware.go b/middleware/middleware.go
new file mode 100644
index 000000000..b29fd1e7b
--- /dev/null
+++ b/middleware/middleware.go
@@ -0,0 +1,11 @@
+// Package middleware includes a variety of middleware for
+// the servers to use, according to their configuration.
+package middleware
+
+import "net/http"
+
+// Middleware is a type of function that generates a new
+// layer of middleware. It is imperative that the HandlerFunc
+// being passed in is executed by the middleware, otherwise
+// part of the stack will not be called.
+type Middleware func(http.HandlerFunc) http.HandlerFunc
diff --git a/middleware/redirect.go b/middleware/redirect.go
new file mode 100644
index 000000000..95b02609d
--- /dev/null
+++ b/middleware/redirect.go
@@ -0,0 +1,23 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/mholt/caddy/config"
+)
+
+// Redirect is middleware for redirecting certain requests
+// to other locations.
+func Redirect(redirs []config.Redirect) Middleware {
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ for _, rule := range redirs {
+ if r.URL.Path == rule.From {
+ http.Redirect(w, r, rule.To, rule.Code)
+ break
+ }
+ }
+ next(w, r)
+ }
+ }
+}
diff --git a/middleware/rewrite.go b/middleware/rewrite.go
new file mode 100644
index 000000000..bffd9c20d
--- /dev/null
+++ b/middleware/rewrite.go
@@ -0,0 +1,23 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/mholt/caddy/config"
+)
+
+// Rewrite is middleware for rewriting requests internally to
+// a different path.
+func Rewrite(rewrites []config.Rewrite) Middleware {
+ return func(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ for _, rule := range rewrites {
+ if r.URL.Path == rule.From {
+ r.URL.Path = rule.To
+ break
+ }
+ }
+ next(w, r)
+ }
+ }
+}
diff --git a/middleware/util_recorder.go b/middleware/util_recorder.go
new file mode 100644
index 000000000..b66b48529
--- /dev/null
+++ b/middleware/util_recorder.go
@@ -0,0 +1,45 @@
+package middleware
+
+import "net/http"
+
+// responseRecorder is a type of ResponseWriter that captures
+// the status code written to it and also the size of the body
+// written in the response. A status code does not have
+// to be written, however, in which case 200 must be assumed.
+// It is best to have the constructor initialize this type
+// with that default status code.
+type responseRecorder struct {
+ http.ResponseWriter
+ status int
+ size int
+}
+
+// newResponseRecorder makes and returns a new responseRecorder,
+// which captures the HTTP Status code from the ResponseWriter
+// and also the length of the response body written through it.
+// Because a status is not set unless WriteHeader is called
+// explicitly, this constructor initializes with a status code
+// of 200 to cover the default case.
+func newResponseRecorder(w http.ResponseWriter) *responseRecorder {
+ return &responseRecorder{
+ ResponseWriter: w,
+ status: http.StatusOK,
+ }
+}
+
+// WriteHeader records the status code and calls the
+// underlying ResponseWriter's WriteHeader method.
+func (r *responseRecorder) WriteHeader(status int) {
+ r.status = status
+ r.ResponseWriter.WriteHeader(status)
+}
+
+// Write is a wrapper that records the size of the body
+// that gets written.
+func (r *responseRecorder) Write(buf []byte) (int, error) {
+ n, err := r.ResponseWriter.Write(buf)
+ if err == nil {
+ r.size += n
+ }
+ return n, err
+}
diff --git a/middleware/util_replacer.go b/middleware/util_replacer.go
new file mode 100644
index 000000000..1f15e8a2b
--- /dev/null
+++ b/middleware/util_replacer.go
@@ -0,0 +1,91 @@
+package middleware
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// replacer is a type which can replace placeholder
+// substrings in a string with actual values from a
+// http.Request and responseRecorder. Always use
+// newReplacer to get one of these.
+type replacer map[string]string
+
+// newReplacer makes a new replacer based on r and rw.
+// Do not create a new replacer until r and rw have all
+// the needed values, because this function copies those
+// values into the replacer.
+func newReplacer(r *http.Request, rw *responseRecorder) replacer {
+ rep := replacer{
+ "{method}": r.Method,
+ "{scheme}": func() string {
+ if r.TLS != nil {
+ return "https"
+ }
+ return "http"
+ }(),
+ "{host}": r.Host,
+ "{path}": r.URL.Path,
+ "{query}": r.URL.RawQuery,
+ "{fragment}": r.URL.Fragment,
+ "{proto}": r.Proto,
+ "{remote}": func() string {
+ if idx := strings.Index(r.RemoteAddr, ":"); idx > -1 {
+ return r.RemoteAddr[:idx] // IP address only
+ } else {
+ return r.RemoteAddr
+ }
+ }(),
+ "{port}": func() string {
+ if idx := strings.Index(r.Host, ":"); idx > -1 {
+ return r.Host[idx+1:] // port only
+ }
+ return ""
+ }(),
+ "{uri}": r.RequestURI,
+ "{time}": func() string {
+ return time.Now().Format(timeFormat)
+ }(),
+ "{status}": strconv.Itoa(rw.status),
+ "{size}": strconv.Itoa(rw.size),
+ }
+
+ // Header placeholders
+ for header, val := range r.Header {
+ rep[headerReplacer+header+"}"] = strings.Join(val, ",")
+ }
+
+ return rep
+}
+
+// replace performs a replacement of values on s and returns
+// the string with the replaced values.
+func (r replacer) replace(s string) string {
+ for placeholder, replacement := range r {
+ if replacement == "" {
+ replacement = emptyStringReplacer
+ }
+ s = strings.Replace(s, placeholder, replacement, -1)
+ }
+
+ // Replace any header placeholders that weren't found
+ for strings.Contains(s, headerReplacer) {
+ idxStart := strings.Index(s, headerReplacer)
+ endOffset := idxStart + len(headerReplacer)
+ idxEnd := strings.Index(s[endOffset:], "}")
+ if idxEnd > -1 {
+ s = s[:idxStart] + emptyStringReplacer + s[endOffset+idxEnd+1:]
+ } else {
+ break
+ }
+ }
+ return s
+}
+
+const (
+ timeFormat = "02/Jan/2006:15:04:05 -0700"
+ headerReplacer = "{>"
+ emptyStringReplacer = "-"
+)
diff --git a/server/server.go b/server/server.go
new file mode 100644
index 000000000..258f943a7
--- /dev/null
+++ b/server/server.go
@@ -0,0 +1,176 @@
+package server
+
+import (
+ "errors"
+ "log"
+ "net/http"
+ "os"
+
+ "github.com/mholt/caddy/config"
+ "github.com/mholt/caddy/middleware"
+)
+
+// servers maintains a registry of running servers.
+var servers = make(map[string]*Server)
+
+// Server represents an instance of a server, which serves
+// static content at a particular address (host and port).
+type Server struct {
+ config config.Config
+ reqlog *log.Logger
+ errlog *log.Logger
+ fileServer http.Handler
+ stack http.HandlerFunc
+}
+
+// New creates a new Server and registers it with the list
+// of servers created. Each server must have a unique host:port
+// combination. This function does not start serving.
+func New(conf config.Config) (*Server, error) {
+ addr := conf.Address()
+
+ // Unique address check
+ if _, exists := servers[addr]; exists {
+ return nil, errors.New("Address " + addr + " is already in use")
+ }
+
+ // Initialize
+ s := new(Server)
+ s.config = conf
+
+ // Register the server
+ servers[addr] = s
+
+ return s, nil
+}
+
+// Serve starts the server. It blocks until the server quits.
+func (s *Server) Serve() error {
+ err := s.configureStack()
+ if err != nil {
+ return err
+ }
+
+ if s.config.TLS.Enabled {
+ return http.ListenAndServeTLS(s.config.Address(), s.config.TLS.Certificate, s.config.TLS.Key, s)
+ } else {
+ return http.ListenAndServe(s.config.Address(), s)
+ }
+}
+
+// ServeHTTP is the entry point for each request to s.
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.stack(w, r)
+}
+
+// Log writes a message to the server's configured error log,
+// if there is one, or if there isn't, to the default stderr log.
+func (s *Server) Log(v ...interface{}) {
+ if s.errlog != nil {
+ s.errlog.Println(v)
+ } else {
+ log.Println(v)
+ }
+}
+
+// configureStack builds the server's middleware stack based
+// on its config. This method should be called last before
+// ListenAndServe begins.
+func (s *Server) configureStack() error {
+ var mid []middleware.Middleware
+ var err error
+ conf := s.config
+
+ // FileServer is the main application layer
+ s.fileServer = http.FileServer(http.Dir(conf.Root))
+
+ // push prepends each middleware to the stack so the
+ // compilation can iterate them in a natural, increasing order
+ push := func(m middleware.Middleware) {
+ mid = append(mid, nil)
+ copy(mid[1:], mid[0:])
+ mid[0] = m
+ }
+
+ // BEGIN ADDING MIDDLEWARE
+ // Middleware will be executed in the order they're added.
+
+ if conf.RequestLog.Enabled {
+ if conf.RequestLog.Enabled {
+ s.reqlog, err = enableLogging(conf.RequestLog)
+ if err != nil {
+ return err
+ }
+ }
+ push(middleware.RequestLog(s.reqlog, conf.RequestLog.Format))
+ }
+
+ if conf.ErrorLog.Enabled {
+ if conf.ErrorLog.Enabled {
+ s.errlog, err = enableLogging(conf.ErrorLog)
+ if err != nil {
+ return err
+ }
+ }
+ push(middleware.ErrorLog(s.errlog, conf.ErrorLog.Format))
+ }
+
+ if len(conf.Rewrites) > 0 {
+ push(middleware.Rewrite(conf.Rewrites))
+ }
+
+ if len(conf.Redirects) > 0 {
+ push(middleware.Redirect(conf.Redirects))
+ }
+
+ if len(conf.Extensions) > 0 {
+ push(middleware.Extensionless(conf.Root, conf.Extensions))
+ }
+
+ if len(conf.Headers) > 0 {
+ push(middleware.Headers(conf.Headers))
+ }
+
+ if conf.Gzip {
+ push(middleware.Gzip)
+ }
+
+ // END ADDING MIDDLEWARE
+
+ // Compiling the middleware unwraps each HandlerFunc,
+ // fully configured, ready to serve every request.
+ s.compile(mid)
+
+ return nil
+}
+
+// compile is an elegant alternative to nesting middleware generator
+// function calls like handler1(handler2(handler3(finalHandler))).
+func (s *Server) compile(layers []middleware.Middleware) {
+ s.stack = s.fileServer.ServeHTTP // core app layer
+ for _, layer := range layers {
+ s.stack = layer(s.stack)
+ }
+}
+
+// enableLogging opens a log file and keeps it open for the lifetime
+// of the server. In fact, the log file is never closed as long as
+// the program is running, since the server will be running for
+// that long. If that ever changes, the log file should be closed.
+func enableLogging(l config.Log) (*log.Logger, error) {
+ var file *os.File
+ var err error
+
+ if l.OutputFile == "stdout" {
+ file = os.Stdout
+ } else if l.OutputFile == "stderr" {
+ file = os.Stderr
+ } else {
+ file, err = os.OpenFile(l.OutputFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return log.New(file, "", 0), nil
+}