diff options
author | Bjørn Erik Pedersen <[email protected]> | 2023-06-16 08:17:42 +0200 |
---|---|---|
committer | Bjørn Erik Pedersen <[email protected]> | 2023-06-18 13:03:04 +0200 |
commit | 7c9fada778e91976d4ba1cbe942235a9bbeaf5cb (patch) | |
tree | a717f6e0a5915777ae6859564acd13385213bbab /deploy/deploy.go | |
parent | 0e7944658660b5658b7640dce3cb346d7198d8c9 (diff) | |
download | hugo-7c9fada778e91976d4ba1cbe942235a9bbeaf5cb.tar.gz hugo-7c9fada778e91976d4ba1cbe942235a9bbeaf5cb.zip |
Replace the old log setup, with structured logging etc.
Fixes #11124
Diffstat (limited to 'deploy/deploy.go')
-rw-r--r-- | deploy/deploy.go | 81 |
1 files changed, 43 insertions, 38 deletions
diff --git a/deploy/deploy.go b/deploy/deploy.go index db88996a9..60a3da363 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -37,10 +37,10 @@ import ( "github.com/dustin/go-humanize" "github.com/gobwas/glob" + "github.com/gohugoio/hugo/common/loggers" "github.com/gohugoio/hugo/config" "github.com/gohugoio/hugo/media" "github.com/spf13/afero" - jww "github.com/spf13/jwalterweatherman" "golang.org/x/text/unicode/norm" "gocloud.dev/blob" @@ -56,9 +56,10 @@ type Deployer struct { bucket *blob.Bucket mediaTypes media.Types // Hugo's MediaType to guess ContentType - quiet bool // true reduces STDOUT + quiet bool // true reduces STDOUT // TODO(bep) remove, this is a global feature. - cfg DeployConfig + cfg DeployConfig + logger loggers.Logger target *Target // the target to deploy to @@ -73,7 +74,7 @@ type deploySummary struct { const metaMD5Hash = "md5chksum" // the meta key to store md5hash in // New constructs a new *Deployer. -func New(cfg config.AllProvider, localFs afero.Fs) (*Deployer, error) { +func New(cfg config.AllProvider, logger loggers.Logger, localFs afero.Fs) (*Deployer, error) { dcfg := cfg.GetConfigSection(deploymentConfigKey).(DeployConfig) targetName := dcfg.Target @@ -112,12 +113,16 @@ func (d *Deployer) openBucket(ctx context.Context) (*blob.Bucket, error) { if d.bucket != nil { return d.bucket, nil } - jww.FEEDBACK.Printf("Deploying to target %q (%s)\n", d.target.Name, d.target.URL) + d.logger.Printf("Deploying to target %q (%s)\n", d.target.Name, d.target.URL) return blob.OpenBucket(ctx, d.target.URL) } // Deploy deploys the site to a target. func (d *Deployer) Deploy(ctx context.Context) error { + if d.logger == nil { + d.logger = loggers.NewDefault() + } + bucket, err := d.openBucket(ctx) if err != nil { return err @@ -132,33 +137,33 @@ func (d *Deployer) Deploy(ctx context.Context) error { if d.target != nil { include, exclude = d.target.includeGlob, d.target.excludeGlob } - local, err := walkLocal(d.localFs, d.cfg.Matchers, include, exclude, d.mediaTypes) + local, err := d.walkLocal(d.localFs, d.cfg.Matchers, include, exclude, d.mediaTypes) if err != nil { return err } - jww.INFO.Printf("Found %d local files.\n", len(local)) + d.logger.Infof("Found %d local files.\n", len(local)) d.summary.NumLocal = len(local) // Load remote files from the target. - remote, err := walkRemote(ctx, bucket, include, exclude) + remote, err := d.walkRemote(ctx, bucket, include, exclude) if err != nil { return err } - jww.INFO.Printf("Found %d remote files.\n", len(remote)) + d.logger.Infof("Found %d remote files.\n", len(remote)) d.summary.NumRemote = len(remote) // Diff local vs remote to see what changes need to be applied. - uploads, deletes := findDiffs(local, remote, d.cfg.Force) + uploads, deletes := d.findDiffs(local, remote, d.cfg.Force) d.summary.NumUploads = len(uploads) d.summary.NumDeletes = len(deletes) if len(uploads)+len(deletes) == 0 { if !d.quiet { - jww.FEEDBACK.Println("No changes required.") + d.logger.Println("No changes required.") } return nil } if !d.quiet { - jww.FEEDBACK.Println(summarizeChanges(uploads, deletes)) + d.logger.Println(summarizeChanges(uploads, deletes)) } // Ask for confirmation before proceeding. @@ -192,14 +197,14 @@ func (d *Deployer) Deploy(ctx context.Context) error { for _, upload := range uploads { if d.cfg.DryRun { if !d.quiet { - jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload) + d.logger.Printf("[DRY RUN] Would upload: %v\n", upload) } continue } sem <- struct{}{} go func(upload *fileToUpload) { - if err := doSingleUpload(ctx, bucket, upload); err != nil { + if err := d.doSingleUpload(ctx, bucket, upload); err != nil { errMu.Lock() defer errMu.Unlock() errs = append(errs, err) @@ -214,7 +219,7 @@ func (d *Deployer) Deploy(ctx context.Context) error { } if d.cfg.MaxDeletes != -1 && len(deletes) > d.cfg.MaxDeletes { - jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.cfg.MaxDeletes) + d.logger.Warnf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.cfg.MaxDeletes) d.summary.NumDeletes = 0 } else { // Apply deletes in parallel. @@ -223,16 +228,16 @@ func (d *Deployer) Deploy(ctx context.Context) error { for _, del := range deletes { if d.cfg.DryRun { if !d.quiet { - jww.FEEDBACK.Printf("[DRY RUN] Would delete %s\n", del) + d.logger.Printf("[DRY RUN] Would delete %s\n", del) } continue } sem <- struct{}{} go func(del string) { - jww.INFO.Printf("Deleting %s...\n", del) + d.logger.Infof("Deleting %s...\n", del) if err := bucket.Delete(ctx, del); err != nil { if gcerrors.Code(err) == gcerrors.NotFound { - jww.WARN.Printf("Failed to delete %q because it wasn't found: %v", del, err) + d.logger.Warnf("Failed to delete %q because it wasn't found: %v", del, err) } else { errMu.Lock() defer errMu.Unlock() @@ -250,24 +255,24 @@ func (d *Deployer) Deploy(ctx context.Context) error { if len(errs) > 0 { if !d.quiet { - jww.FEEDBACK.Printf("Encountered %d errors.\n", len(errs)) + d.logger.Printf("Encountered %d errors.\n", len(errs)) } return errs[0] } if !d.quiet { - jww.FEEDBACK.Println("Success!") + d.logger.Println("Success!") } if d.cfg.InvalidateCDN { if d.target.CloudFrontDistributionID != "" { if d.cfg.DryRun { if !d.quiet { - jww.FEEDBACK.Printf("[DRY RUN] Would invalidate CloudFront CDN with ID %s\n", d.target.CloudFrontDistributionID) + d.logger.Printf("[DRY RUN] Would invalidate CloudFront CDN with ID %s\n", d.target.CloudFrontDistributionID) } } else { - jww.FEEDBACK.Println("Invalidating CloudFront CDN...") + d.logger.Println("Invalidating CloudFront CDN...") if err := InvalidateCloudFront(ctx, d.target.CloudFrontDistributionID); err != nil { - jww.FEEDBACK.Printf("Failed to invalidate CloudFront CDN: %v\n", err) + d.logger.Printf("Failed to invalidate CloudFront CDN: %v\n", err) return err } } @@ -275,17 +280,17 @@ func (d *Deployer) Deploy(ctx context.Context) error { if d.target.GoogleCloudCDNOrigin != "" { if d.cfg.DryRun { if !d.quiet { - jww.FEEDBACK.Printf("[DRY RUN] Would invalidate Google Cloud CDN with origin %s\n", d.target.GoogleCloudCDNOrigin) + d.logger.Printf("[DRY RUN] Would invalidate Google Cloud CDN with origin %s\n", d.target.GoogleCloudCDNOrigin) } } else { - jww.FEEDBACK.Println("Invalidating Google Cloud CDN...") + d.logger.Println("Invalidating Google Cloud CDN...") if err := InvalidateGoogleCloudCDN(ctx, d.target.GoogleCloudCDNOrigin); err != nil { - jww.FEEDBACK.Printf("Failed to invalidate Google Cloud CDN: %v\n", err) + d.logger.Printf("Failed to invalidate Google Cloud CDN: %v\n", err) return err } } } - jww.FEEDBACK.Println("Success!") + d.logger.Println("Success!") } return nil } @@ -300,8 +305,8 @@ func summarizeChanges(uploads []*fileToUpload, deletes []string) string { } // doSingleUpload executes a single file upload. -func doSingleUpload(ctx context.Context, bucket *blob.Bucket, upload *fileToUpload) error { - jww.INFO.Printf("Uploading %v...\n", upload) +func (d *Deployer) doSingleUpload(ctx context.Context, bucket *blob.Bucket, upload *fileToUpload) error { + d.logger.Infof("Uploading %v...\n", upload) opts := &blob.WriterOptions{ CacheControl: upload.Local.CacheControl(), ContentEncoding: upload.Local.ContentEncoding(), @@ -479,7 +484,7 @@ func knownHiddenDirectory(name string) bool { // walkLocal walks the source directory and returns a flat list of files, // using localFile.SlashPath as the map keys. -func walkLocal(fs afero.Fs, matchers []*Matcher, include, exclude glob.Glob, mediaTypes media.Types) (map[string]*localFile, error) { +func (d *Deployer) walkLocal(fs afero.Fs, matchers []*Matcher, include, exclude glob.Glob, mediaTypes media.Types) (map[string]*localFile, error) { retval := map[string]*localFile{} err := afero.Walk(fs, "", func(path string, info os.FileInfo, err error) error { if err != nil { @@ -509,11 +514,11 @@ func walkLocal(fs afero.Fs, matchers []*Matcher, include, exclude glob.Glob, med // Check include/exclude matchers. slashpath := filepath.ToSlash(path) if include != nil && !include.Match(slashpath) { - jww.INFO.Printf(" dropping %q due to include\n", slashpath) + d.logger.Infof(" dropping %q due to include\n", slashpath) return nil } if exclude != nil && exclude.Match(slashpath) { - jww.INFO.Printf(" dropping %q due to exclude\n", slashpath) + d.logger.Infof(" dropping %q due to exclude\n", slashpath) return nil } @@ -539,7 +544,7 @@ func walkLocal(fs afero.Fs, matchers []*Matcher, include, exclude glob.Glob, med } // walkRemote walks the target bucket and returns a flat list. -func walkRemote(ctx context.Context, bucket *blob.Bucket, include, exclude glob.Glob) (map[string]*blob.ListObject, error) { +func (d *Deployer) walkRemote(ctx context.Context, bucket *blob.Bucket, include, exclude glob.Glob) (map[string]*blob.ListObject, error) { retval := map[string]*blob.ListObject{} iter := bucket.List(nil) for { @@ -552,11 +557,11 @@ func walkRemote(ctx context.Context, bucket *blob.Bucket, include, exclude glob. } // Check include/exclude matchers. if include != nil && !include.Match(obj.Key) { - jww.INFO.Printf(" remote dropping %q due to include\n", obj.Key) + d.logger.Infof(" remote dropping %q due to include\n", obj.Key) continue } if exclude != nil && exclude.Match(obj.Key) { - jww.INFO.Printf(" remote dropping %q due to exclude\n", obj.Key) + d.logger.Infof(" remote dropping %q due to exclude\n", obj.Key) continue } // If the remote didn't give us an MD5, use remote attributes MD5, if that doesn't exist compute one. @@ -629,7 +634,7 @@ func (u *fileToUpload) String() string { // findDiffs diffs localFiles vs remoteFiles to see what changes should be // applied to the remote target. It returns a slice of *fileToUpload and a // slice of paths for files to delete. -func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.ListObject, force bool) ([]*fileToUpload, []string) { +func (d *Deployer) findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.ListObject, force bool) ([]*fileToUpload, []string) { var uploads []*fileToUpload var deletes []string @@ -680,10 +685,10 @@ func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.Li reason = reasonNotFound } if upload { - jww.DEBUG.Printf("%s needs to be uploaded: %v\n", path, reason) + d.logger.Debugf("%s needs to be uploaded: %v\n", path, reason) uploads = append(uploads, &fileToUpload{lf, reason}) } else { - jww.DEBUG.Printf("%s exists at target and does not need to be uploaded", path) + d.logger.Debugf("%s exists at target and does not need to be uploaded", path) } } |