aboutsummaryrefslogtreecommitdiffhomepage
path: root/deploy
diff options
context:
space:
mode:
authorRobert van Gent <[email protected]>2019-05-03 09:30:46 -0700
committerBjørn Erik Pedersen <[email protected]>2019-05-06 22:59:07 +0200
commit527cf1ab03fe4746885e90a197bc25decad88f89 (patch)
treed16788f0ccd902e739acc75884521c869266ceb4 /deploy
parentf4956d9aae69b1cb5715114cf5242fd80a9cabc7 (diff)
downloadhugo-527cf1ab03fe4746885e90a197bc25decad88f89.tar.gz
hugo-527cf1ab03fe4746885e90a197bc25decad88f89.zip
deploy: Support configuration of upload order
Diffstat (limited to 'deploy')
-rw-r--r--deploy/deploy.go112
-rw-r--r--deploy/deployConfig.go10
-rw-r--r--deploy/deployConfig_test.go33
-rw-r--r--deploy/deploy_test.go59
4 files changed, 182 insertions, 32 deletions
diff --git a/deploy/deploy.go b/deploy/deploy.go
index 0cea4a9e3..dadff7d40 100644
--- a/deploy/deploy.go
+++ b/deploy/deploy.go
@@ -23,7 +23,9 @@ import (
"mime"
"os"
"path/filepath"
+ "regexp"
"runtime"
+ "sort"
"strings"
"sync"
@@ -45,14 +47,15 @@ import (
type Deployer struct {
localFs afero.Fs
- target *target // the target to deploy to
- matchers []*matcher // matchers to apply to uploaded files
- quiet bool // true reduces STDOUT
- confirm bool // true enables confirmation before making changes
- dryRun bool // true skips conformations and prints changes instead of applying them
- force bool // true forces upload of all files
- invalidateCDN bool // true enables invalidate CDN cache (if possible)
- maxDeletes int // caps the # of files to delete; -1 to disable
+ target *target // the target to deploy to
+ matchers []*matcher // matchers to apply to uploaded files
+ ordering []*regexp.Regexp // orders uploads
+ quiet bool // true reduces STDOUT
+ confirm bool // true enables confirmation before making changes
+ dryRun bool // true skips conformations and prints changes instead of applying them
+ force bool // true forces upload of all files
+ invalidateCDN bool // true enables invalidate CDN cache (if possible)
+ maxDeletes int // caps the # of files to delete; -1 to disable
}
// New constructs a new *Deployer.
@@ -79,6 +82,7 @@ func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) {
localFs: localFs,
target: tgt,
matchers: dcfg.Matchers,
+ ordering: dcfg.ordering,
quiet: cfg.GetBool("quiet"),
confirm: cfg.GetBool("confirm"),
dryRun: cfg.GetBool("dryRun"),
@@ -138,40 +142,55 @@ func (d *Deployer) Deploy(ctx context.Context) error {
}
}
+ // Order the uploads. They are organized in groups; all uploads in a group
+ // must be complete before moving on to the next group.
+ uploadGroups := applyOrdering(d.ordering, uploads)
+
// Apply the changes in parallel, using an inverted worker
// pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
// sem prevents more than nParallel concurrent goroutines.
const nParallel = 10
- sem := make(chan struct{}, nParallel)
var errs []error
var errMu sync.Mutex // protects errs
- for _, upload := range uploads {
- if d.dryRun {
- if !d.quiet {
- jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
- }
+ for _, uploads := range uploadGroups {
+ // Short-circuit for an empty group.
+ if len(uploads) == 0 {
continue
}
- // TODO: Add a progress indicator, as this can take a while
- // depending on the number of files, upload speed, and size of the
- // site.
-
- sem <- struct{}{}
- go func(upload *fileToUpload) {
- if err := doSingleUpload(ctx, bucket, upload); err != nil {
- errMu.Lock()
- defer errMu.Unlock()
- errs = append(errs, err)
+ // Within the group, apply uploads in parallel.
+ sem := make(chan struct{}, nParallel)
+ for _, upload := range uploads {
+ if d.dryRun {
+ if !d.quiet {
+ jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
+ }
+ continue
}
- <-sem
- }(upload)
+
+ sem <- struct{}{}
+ go func(upload *fileToUpload) {
+ if err := doSingleUpload(ctx, bucket, upload); err != nil {
+ errMu.Lock()
+ defer errMu.Unlock()
+ errs = append(errs, err)
+ }
+ <-sem
+ }(upload)
+ }
+ // Wait for all uploads in the group to finish.
+ for n := nParallel; n > 0; n-- {
+ sem <- struct{}{}
+ }
}
if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
} else {
+ // Apply deletes in parallel.
+ sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] })
+ sem := make(chan struct{}, nParallel)
for _, del := range deletes {
if d.dryRun {
if !d.quiet {
@@ -190,10 +209,10 @@ func (d *Deployer) Deploy(ctx context.Context) error {
<-sem
}(del)
}
- }
- // Wait for all uploads/deletes to finish.
- for n := nParallel; n > 0; n-- {
- sem <- struct{}{}
+ // Wait for all deletes to finish.
+ for n := nParallel; n > 0; n-- {
+ sem <- struct{}{}
+ }
}
if len(errs) > 0 {
if !d.quiet {
@@ -551,3 +570,36 @@ func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.Li
}
return uploads, deletes
}
+
+// applyOrdering returns an ordered slice of slices of uploads.
+//
+// The returned slice will have length len(ordering)+1.
+//
+// The subslice at index i, for i = 0 ... len(ordering)-1, will have all of the
+// uploads whose Local.Path matched the regex at ordering[i] (but not any
+// previous ordering regex).
+// The subslice at index len(ordering) will have the remaining uploads that
+// didn't match any ordering regex.
+//
+// The subslices are sorted by Local.Path.
+func applyOrdering(ordering []*regexp.Regexp, uploads []*fileToUpload) [][]*fileToUpload {
+
+ // Sort the whole slice by Local.Path first.
+ sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.Path < uploads[j].Local.Path })
+
+ retval := make([][]*fileToUpload, len(ordering)+1)
+ for _, u := range uploads {
+ matched := false
+ for i, re := range ordering {
+ if re.MatchString(u.Local.Path) {
+ retval[i] = append(retval[i], u)
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ retval[len(ordering)] = append(retval[len(ordering)], u)
+ }
+ }
+ return retval
+}
diff --git a/deploy/deployConfig.go b/deploy/deployConfig.go
index 066fa0ef8..3cfa27013 100644
--- a/deploy/deployConfig.go
+++ b/deploy/deployConfig.go
@@ -27,6 +27,9 @@ const deploymentConfigKey = "deployment"
type deployConfig struct {
Targets []*target
Matchers []*matcher
+ Order []string
+
+ ordering []*regexp.Regexp // compiled Order
}
type target struct {
@@ -86,5 +89,12 @@ func decodeConfig(cfg config.Provider) (deployConfig, error) {
return dcfg, fmt.Errorf("invalid deployment.matchers.pattern: %v", err)
}
}
+ for _, o := range dcfg.Order {
+ re, err := regexp.Compile(o)
+ if err != nil {
+ return dcfg, fmt.Errorf("invalid deployment.orderings.pattern: %v", err)
+ }
+ dcfg.ordering = append(dcfg.ordering, re)
+ }
return dcfg, nil
}
diff --git a/deploy/deployConfig_test.go b/deploy/deployConfig_test.go
index 3e29d8edf..3f849d89c 100644
--- a/deploy/deployConfig_test.go
+++ b/deploy/deployConfig_test.go
@@ -29,6 +29,9 @@ func TestDecodeConfigFromTOML(t *testing.T) {
someOtherValue = "foo"
[deployment]
+
+order = ["o1", "o2"]
+
[[deployment.targets]]
Name = "name1"
URL = "url1"
@@ -59,6 +62,11 @@ content-type = "contenttype2"
dcfg, err := decodeConfig(cfg)
assert.NoError(err)
+ assert.Equal(2, len(dcfg.Order))
+ assert.Equal("o1", dcfg.Order[0])
+ assert.Equal("o2", dcfg.Order[1])
+ assert.Equal(2, len(dcfg.ordering))
+
assert.Equal(2, len(dcfg.Targets))
assert.Equal("name1", dcfg.Targets[0].Name)
assert.Equal("url1", dcfg.Targets[0].URL)
@@ -69,11 +77,36 @@ content-type = "contenttype2"
assert.Equal(2, len(dcfg.Matchers))
assert.Equal("^pattern1$", dcfg.Matchers[0].Pattern)
+ assert.NotNil(dcfg.Matchers[0].re)
assert.Equal("cachecontrol1", dcfg.Matchers[0].CacheControl)
assert.Equal("contentencoding1", dcfg.Matchers[0].ContentEncoding)
assert.Equal("contenttype1", dcfg.Matchers[0].ContentType)
assert.True(dcfg.Matchers[0].Gzip)
assert.True(dcfg.Matchers[0].Force)
+ assert.Equal("^pattern2$", dcfg.Matchers[1].Pattern)
+ assert.NotNil(dcfg.Matchers[1].re)
+ assert.Equal("cachecontrol2", dcfg.Matchers[1].CacheControl)
+ assert.Equal("contentencoding2", dcfg.Matchers[1].ContentEncoding)
+ assert.Equal("contenttype2", dcfg.Matchers[1].ContentType)
+ assert.False(dcfg.Matchers[1].Gzip)
+ assert.False(dcfg.Matchers[1].Force)
+}
+
+func TestInvalidOrderingPattern(t *testing.T) {
+ assert := require.New(t)
+
+ tomlConfig := `
+
+someOtherValue = "foo"
+
+[deployment]
+order = ["["] # invalid regular expression
+`
+ cfg, err := config.FromConfigString(tomlConfig, "toml")
+ assert.NoError(err)
+
+ _, err = decodeConfig(cfg)
+ assert.Error(err)
}
func TestInvalidMatcherPattern(t *testing.T) {
diff --git a/deploy/deploy_test.go b/deploy/deploy_test.go
index 1c6afb2e9..519a3963f 100644
--- a/deploy/deploy_test.go
+++ b/deploy/deploy_test.go
@@ -19,6 +19,7 @@ import (
"crypto/md5"
"io/ioutil"
"os"
+ "regexp"
"sort"
"testing"
@@ -174,11 +175,10 @@ func TestDeploy_FindDiffs(t *testing.T) {
remote[r.Key] = r
}
gotUpdates, gotDeletes := findDiffs(local, remote, tc.Force)
- sort.Slice(gotUpdates, func(i, j int) bool { return gotUpdates[i].Local.Path < gotUpdates[j].Local.Path })
+ gotUpdates = applyOrdering(nil, gotUpdates)[0]
sort.Slice(gotDeletes, func(i, j int) bool { return gotDeletes[i] < gotDeletes[j] })
if diff := cmp.Diff(gotUpdates, tc.WantUpdates, cmpopts.IgnoreUnexported(localFile{})); diff != "" {
t.Errorf("updates differ:\n%s", diff)
-
}
if diff := cmp.Diff(gotDeletes, tc.WantDeletes); diff != "" {
t.Errorf("deletes differ:\n%s", diff)
@@ -306,3 +306,58 @@ func TestDeploy_LocalFile(t *testing.T) {
})
}
}
+
+func TestOrdering(t *testing.T) {
+ tests := []struct {
+ Description string
+ Uploads []string
+ Ordering []*regexp.Regexp
+ Want [][]string
+ }{
+ {
+ Description: "empty",
+ Want: [][]string{nil},
+ },
+ {
+ Description: "no ordering",
+ Uploads: []string{"c", "b", "a", "d"},
+ Want: [][]string{{"a", "b", "c", "d"}},
+ },
+ {
+ Description: "one ordering",
+ Uploads: []string{"db", "c", "b", "a", "da"},
+ Ordering: []*regexp.Regexp{regexp.MustCompile("^d")},
+ Want: [][]string{{"da", "db"}, {"a", "b", "c"}},
+ },
+ {
+ Description: "two orderings",
+ Uploads: []string{"db", "c", "b", "a", "da"},
+ Ordering: []*regexp.Regexp{
+ regexp.MustCompile("^d"),
+ regexp.MustCompile("^b"),
+ },
+ Want: [][]string{{"da", "db"}, {"b"}, {"a", "c"}},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.Description, func(t *testing.T) {
+ uploads := make([]*fileToUpload, len(tc.Uploads))
+ for i, u := range tc.Uploads {
+ uploads[i] = &fileToUpload{Local: &localFile{Path: u}}
+ }
+ gotUploads := applyOrdering(tc.Ordering, uploads)
+ var got [][]string
+ for _, subslice := range gotUploads {
+ var gotsubslice []string
+ for _, u := range subslice {
+ gotsubslice = append(gotsubslice, u.Local.Path)
+ }
+ got = append(got, gotsubslice)
+ }
+ if diff := cmp.Diff(got, tc.Want); diff != "" {
+ t.Error(diff)
+ }
+ })
+ }
+}