Create a struct with all of Hugo's config options

Primary motivation is documentation, but it will also hopefully simplify the code.

Also,

* Lower case the default output format names; this is in line with the custom ones (map keys) and how
it's treated all the places. This avoids doing `stringds.EqualFold` everywhere.

Closes #10896
Closes #10620
This commit is contained in:
Bjørn Erik Pedersen 2023-01-04 18:24:36 +01:00
parent 6aededf6b4
commit 241b21b0fd
337 changed files with 13377 additions and 14898 deletions

View file

@ -55,17 +55,12 @@ type Deployer struct {
localFs afero.Fs
bucket *blob.Bucket
target *target // the target to deploy to
matchers []*matcher // matchers to apply to uploaded files
mediaTypes media.Types // Hugo's MediaType to guess ContentType
ordering []*regexp.Regexp // orders uploads
quiet bool // true reduces STDOUT
confirm bool // true enables confirmation before making changes
dryRun bool // true skips conformations and prints changes instead of applying them
force bool // true forces upload of all files
invalidateCDN bool // true enables invalidate CDN cache (if possible)
maxDeletes int // caps the # of files to delete; -1 to disable
workers int // The number of workers to transfer files
mediaTypes media.Types // Hugo's MediaType to guess ContentType
quiet bool // true reduces STDOUT
cfg DeployConfig
target *Target // the target to deploy to
// For tests...
summary deploySummary // summary of latest Deploy results
@ -78,21 +73,18 @@ type deploySummary struct {
const metaMD5Hash = "md5chksum" // the meta key to store md5hash in
// New constructs a new *Deployer.
func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) {
targetName := cfg.GetString("target")
func New(cfg config.AllProvider, localFs afero.Fs) (*Deployer, error) {
// Load the [deployment] section of the config.
dcfg, err := decodeConfig(cfg)
if err != nil {
return nil, err
}
dcfg := cfg.GetConfigSection(deploymentConfigKey).(DeployConfig)
targetName := dcfg.Target
if len(dcfg.Targets) == 0 {
return nil, errors.New("no deployment targets found")
}
mediaTypes := cfg.GetConfigSection("mediaTypes").(media.Types)
// Find the target to deploy to.
var tgt *target
var tgt *Target
if targetName == "" {
// Default to the first target.
tgt = dcfg.Targets[0]
@ -108,18 +100,11 @@ func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) {
}
return &Deployer{
localFs: localFs,
target: tgt,
matchers: dcfg.Matchers,
ordering: dcfg.ordering,
mediaTypes: dcfg.mediaTypes,
quiet: cfg.GetBool("quiet"),
confirm: cfg.GetBool("confirm"),
dryRun: cfg.GetBool("dryRun"),
force: cfg.GetBool("force"),
invalidateCDN: cfg.GetBool("invalidateCDN"),
maxDeletes: cfg.GetInt("maxDeletes"),
workers: cfg.GetInt("workers"),
localFs: localFs,
target: tgt,
quiet: cfg.BuildExpired(),
mediaTypes: mediaTypes,
cfg: dcfg,
}, nil
}
@ -138,12 +123,16 @@ func (d *Deployer) Deploy(ctx context.Context) error {
return err
}
if d.cfg.Workers <= 0 {
d.cfg.Workers = 10
}
// Load local files from the source directory.
var include, exclude glob.Glob
if d.target != nil {
include, exclude = d.target.includeGlob, d.target.excludeGlob
}
local, err := walkLocal(d.localFs, d.matchers, include, exclude, d.mediaTypes)
local, err := walkLocal(d.localFs, d.cfg.Matchers, include, exclude, d.mediaTypes)
if err != nil {
return err
}
@ -159,7 +148,7 @@ func (d *Deployer) Deploy(ctx context.Context) error {
d.summary.NumRemote = len(remote)
// Diff local vs remote to see what changes need to be applied.
uploads, deletes := findDiffs(local, remote, d.force)
uploads, deletes := findDiffs(local, remote, d.cfg.Force)
d.summary.NumUploads = len(uploads)
d.summary.NumDeletes = len(deletes)
if len(uploads)+len(deletes) == 0 {
@ -173,7 +162,7 @@ func (d *Deployer) Deploy(ctx context.Context) error {
}
// Ask for confirmation before proceeding.
if d.confirm && !d.dryRun {
if d.cfg.Confirm && !d.cfg.DryRun {
fmt.Printf("Continue? (Y/n) ")
var confirm string
if _, err := fmt.Scanln(&confirm); err != nil {
@ -186,15 +175,9 @@ func (d *Deployer) Deploy(ctx context.Context) error {
// Order the uploads. They are organized in groups; all uploads in a group
// must be complete before moving on to the next group.
uploadGroups := applyOrdering(d.ordering, uploads)
uploadGroups := applyOrdering(d.cfg.ordering, uploads)
// Apply the changes in parallel, using an inverted worker
// pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
// sem prevents more than nParallel concurrent goroutines.
if d.workers <= 0 {
d.workers = 10
}
nParallel := d.workers
nParallel := d.cfg.Workers
var errs []error
var errMu sync.Mutex // protects errs
@ -207,7 +190,7 @@ func (d *Deployer) Deploy(ctx context.Context) error {
// Within the group, apply uploads in parallel.
sem := make(chan struct{}, nParallel)
for _, upload := range uploads {
if d.dryRun {
if d.cfg.DryRun {
if !d.quiet {
jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
}
@ -230,15 +213,15 @@ func (d *Deployer) Deploy(ctx context.Context) error {
}
}
if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
if d.cfg.MaxDeletes != -1 && len(deletes) > d.cfg.MaxDeletes {
jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.cfg.MaxDeletes)
d.summary.NumDeletes = 0
} else {
// Apply deletes in parallel.
sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] })
sem := make(chan struct{}, nParallel)
for _, del := range deletes {
if d.dryRun {
if d.cfg.DryRun {
if !d.quiet {
jww.FEEDBACK.Printf("[DRY RUN] Would delete %s\n", del)
}
@ -264,6 +247,7 @@ func (d *Deployer) Deploy(ctx context.Context) error {
sem <- struct{}{}
}
}
if len(errs) > 0 {
if !d.quiet {
jww.FEEDBACK.Printf("Encountered %d errors.\n", len(errs))
@ -274,9 +258,9 @@ func (d *Deployer) Deploy(ctx context.Context) error {
jww.FEEDBACK.Println("Success!")
}
if d.invalidateCDN {
if d.cfg.InvalidateCDN {
if d.target.CloudFrontDistributionID != "" {
if d.dryRun {
if d.cfg.DryRun {
if !d.quiet {
jww.FEEDBACK.Printf("[DRY RUN] Would invalidate CloudFront CDN with ID %s\n", d.target.CloudFrontDistributionID)
}
@ -289,7 +273,7 @@ func (d *Deployer) Deploy(ctx context.Context) error {
}
}
if d.target.GoogleCloudCDNOrigin != "" {
if d.dryRun {
if d.cfg.DryRun {
if !d.quiet {
jww.FEEDBACK.Printf("[DRY RUN] Would invalidate Google Cloud CDN with origin %s\n", d.target.GoogleCloudCDNOrigin)
}
@ -356,14 +340,14 @@ type localFile struct {
UploadSize int64
fs afero.Fs
matcher *matcher
matcher *Matcher
md5 []byte // cache
gzipped bytes.Buffer // cached of gzipped contents if gzipping
mediaTypes media.Types
}
// newLocalFile initializes a *localFile.
func newLocalFile(fs afero.Fs, nativePath, slashpath string, m *matcher, mt media.Types) (*localFile, error) {
func newLocalFile(fs afero.Fs, nativePath, slashpath string, m *Matcher, mt media.Types) (*localFile, error) {
f, err := fs.Open(nativePath)
if err != nil {
return nil, err
@ -448,7 +432,7 @@ func (lf *localFile) ContentType() string {
ext := filepath.Ext(lf.NativePath)
if mimeType, _, found := lf.mediaTypes.GetFirstBySuffix(strings.TrimPrefix(ext, ".")); found {
return mimeType.Type()
return mimeType.Type
}
return mime.TypeByExtension(ext)
@ -495,7 +479,7 @@ func knownHiddenDirectory(name string) bool {
// walkLocal walks the source directory and returns a flat list of files,
// using localFile.SlashPath as the map keys.
func walkLocal(fs afero.Fs, matchers []*matcher, include, exclude glob.Glob, mediaTypes media.Types) (map[string]*localFile, error) {
func walkLocal(fs afero.Fs, matchers []*Matcher, include, exclude glob.Glob, mediaTypes media.Types) (map[string]*localFile, error) {
retval := map[string]*localFile{}
err := afero.Walk(fs, "", func(path string, info os.FileInfo, err error) error {
if err != nil {
@ -534,7 +518,7 @@ func walkLocal(fs afero.Fs, matchers []*matcher, include, exclude glob.Glob, med
}
// Find the first matching matcher (if any).
var m *matcher
var m *Matcher
for _, cur := range matchers {
if cur.Matches(slashpath) {
m = cur

View file

@ -25,23 +25,37 @@ import (
"github.com/gobwas/glob"
"github.com/gohugoio/hugo/config"
hglob "github.com/gohugoio/hugo/hugofs/glob"
"github.com/gohugoio/hugo/media"
"github.com/mitchellh/mapstructure"
)
const deploymentConfigKey = "deployment"
// deployConfig is the complete configuration for deployment.
type deployConfig struct {
Targets []*target
Matchers []*matcher
// DeployConfig is the complete configuration for deployment.
type DeployConfig struct {
Targets []*Target
Matchers []*Matcher
Order []string
ordering []*regexp.Regexp // compiled Order
mediaTypes media.Types
// Usually set via flags.
// Target deployment Name; defaults to the first one.
Target string
// Show a confirm prompt before deploying.
Confirm bool
// DryRun will try the deployment without any remote changes.
DryRun bool
// Force will re-upload all files.
Force bool
// Invalidate the CDN cache listed in the deployment target.
InvalidateCDN bool
// MaxDeletes is the maximum number of files to delete.
MaxDeletes int
// Number of concurrent workers to use when uploading files.
Workers int
ordering []*regexp.Regexp // compiled Order
}
type target struct {
type Target struct {
Name string
URL string
@ -61,7 +75,7 @@ type target struct {
excludeGlob glob.Glob
}
func (tgt *target) parseIncludeExclude() error {
func (tgt *Target) parseIncludeExclude() error {
var err error
if tgt.Include != "" {
tgt.includeGlob, err = hglob.GetGlob(tgt.Include)
@ -78,9 +92,9 @@ func (tgt *target) parseIncludeExclude() error {
return nil
}
// matcher represents configuration to be applied to files whose paths match
// Matcher represents configuration to be applied to files whose paths match
// a specified pattern.
type matcher struct {
type Matcher struct {
// Pattern is the string pattern to match against paths.
// Matching is done against paths converted to use / as the path separator.
Pattern string
@ -109,15 +123,14 @@ type matcher struct {
re *regexp.Regexp
}
func (m *matcher) Matches(path string) bool {
func (m *Matcher) Matches(path string) bool {
return m.re.MatchString(path)
}
// decode creates a config from a given Hugo configuration.
func decodeConfig(cfg config.Provider) (deployConfig, error) {
// DecodeConfig creates a config from a given Hugo configuration.
func DecodeConfig(cfg config.Provider) (DeployConfig, error) {
var (
mediaTypesConfig []map[string]any
dcfg deployConfig
dcfg DeployConfig
)
if !cfg.IsSet(deploymentConfigKey) {
@ -126,8 +139,13 @@ func decodeConfig(cfg config.Provider) (deployConfig, error) {
if err := mapstructure.WeakDecode(cfg.GetStringMap(deploymentConfigKey), &dcfg); err != nil {
return dcfg, err
}
if dcfg.Workers <= 0 {
dcfg.Workers = 10
}
for _, tgt := range dcfg.Targets {
if *tgt == (target{}) {
if *tgt == (Target{}) {
return dcfg, errors.New("empty deployment target")
}
if err := tgt.parseIncludeExclude(); err != nil {
@ -136,7 +154,7 @@ func decodeConfig(cfg config.Provider) (deployConfig, error) {
}
var err error
for _, m := range dcfg.Matchers {
if *m == (matcher{}) {
if *m == (Matcher{}) {
return dcfg, errors.New("empty deployment matcher")
}
m.re, err = regexp.Compile(m.Pattern)
@ -152,13 +170,5 @@ func decodeConfig(cfg config.Provider) (deployConfig, error) {
dcfg.ordering = append(dcfg.ordering, re)
}
if cfg.IsSet("mediaTypes") {
mediaTypesConfig = append(mediaTypesConfig, cfg.GetStringMap("mediaTypes"))
}
dcfg.mediaTypes, err = media.DecodeTypes(mediaTypesConfig...)
if err != nil {
return dcfg, err
}
return dcfg, nil
}

View file

@ -84,7 +84,7 @@ force = true
cfg, err := config.FromConfigString(tomlConfig, "toml")
c.Assert(err, qt.IsNil)
dcfg, err := decodeConfig(cfg)
dcfg, err := DecodeConfig(cfg)
c.Assert(err, qt.IsNil)
// Order.
@ -139,7 +139,7 @@ order = ["["] # invalid regular expression
cfg, err := config.FromConfigString(tomlConfig, "toml")
c.Assert(err, qt.IsNil)
_, err = decodeConfig(cfg)
_, err = DecodeConfig(cfg)
c.Assert(err, qt.Not(qt.IsNil))
}
@ -157,14 +157,14 @@ Pattern = "[" # invalid regular expression
cfg, err := config.FromConfigString(tomlConfig, "toml")
c.Assert(err, qt.IsNil)
_, err = decodeConfig(cfg)
_, err = DecodeConfig(cfg)
c.Assert(err, qt.Not(qt.IsNil))
}
func TestDecodeConfigDefault(t *testing.T) {
c := qt.New(t)
dcfg, err := decodeConfig(config.New())
dcfg, err := DecodeConfig(config.New())
c.Assert(err, qt.IsNil)
c.Assert(len(dcfg.Targets), qt.Equals, 0)
c.Assert(len(dcfg.Matchers), qt.Equals, 0)
@ -180,7 +180,7 @@ func TestEmptyTarget(t *testing.T) {
cfg, err := config.FromConfigString(tomlConfig, "toml")
c.Assert(err, qt.IsNil)
_, err = decodeConfig(cfg)
_, err = DecodeConfig(cfg)
c.Assert(err, qt.Not(qt.IsNil))
}
@ -194,6 +194,6 @@ func TestEmptyMatcher(t *testing.T) {
cfg, err := config.FromConfigString(tomlConfig, "toml")
c.Assert(err, qt.IsNil)
_, err = decodeConfig(cfg)
_, err = DecodeConfig(cfg)
c.Assert(err, qt.Not(qt.IsNil))
}

View file

@ -108,7 +108,7 @@ func TestFindDiffs(t *testing.T) {
{
Description: "local == remote with route.Force true -> diffs",
Local: []*localFile{
{NativePath: "aaa", SlashPath: "aaa", UploadSize: 1, matcher: &matcher{Force: true}, md5: hash1},
{NativePath: "aaa", SlashPath: "aaa", UploadSize: 1, matcher: &Matcher{Force: true}, md5: hash1},
makeLocal("bbb", 2, hash1),
},
Remote: []*blob.ListObject{
@ -289,8 +289,8 @@ func TestLocalFile(t *testing.T) {
tests := []struct {
Description string
Path string
Matcher *matcher
MediaTypesConfig []map[string]any
Matcher *Matcher
MediaTypesConfig map[string]any
WantContent []byte
WantSize int64
WantMD5 []byte
@ -315,7 +315,7 @@ func TestLocalFile(t *testing.T) {
{
Description: "CacheControl from matcher",
Path: "foo.txt",
Matcher: &matcher{CacheControl: "max-age=630720000"},
Matcher: &Matcher{CacheControl: "max-age=630720000"},
WantContent: contentBytes,
WantSize: contentLen,
WantMD5: contentMD5[:],
@ -324,7 +324,7 @@ func TestLocalFile(t *testing.T) {
{
Description: "ContentEncoding from matcher",
Path: "foo.txt",
Matcher: &matcher{ContentEncoding: "foobar"},
Matcher: &Matcher{ContentEncoding: "foobar"},
WantContent: contentBytes,
WantSize: contentLen,
WantMD5: contentMD5[:],
@ -333,7 +333,7 @@ func TestLocalFile(t *testing.T) {
{
Description: "ContentType from matcher",
Path: "foo.txt",
Matcher: &matcher{ContentType: "foo/bar"},
Matcher: &Matcher{ContentType: "foo/bar"},
WantContent: contentBytes,
WantSize: contentLen,
WantMD5: contentMD5[:],
@ -342,7 +342,7 @@ func TestLocalFile(t *testing.T) {
{
Description: "gzipped content",
Path: "foo.txt",
Matcher: &matcher{Gzip: true},
Matcher: &Matcher{Gzip: true},
WantContent: gzBytes,
WantSize: gzLen,
WantMD5: gzMD5[:],
@ -351,11 +351,9 @@ func TestLocalFile(t *testing.T) {
{
Description: "Custom MediaType",
Path: "foo.hugo",
MediaTypesConfig: []map[string]any{
{
"hugo/custom": map[string]any{
"suffixes": []string{"hugo"},
},
MediaTypesConfig: map[string]any{
"hugo/custom": map[string]any{
"suffixes": []string{"hugo"},
},
},
WantContent: contentBytes,
@ -373,11 +371,11 @@ func TestLocalFile(t *testing.T) {
}
mediaTypes := media.DefaultTypes
if len(tc.MediaTypesConfig) > 0 {
mt, err := media.DecodeTypes(tc.MediaTypesConfig...)
mt, err := media.DecodeTypes(tc.MediaTypesConfig)
if err != nil {
t.Fatal(err)
}
mediaTypes = mt
mediaTypes = mt.Config
}
lf, err := newLocalFile(fs, tc.Path, filepath.ToSlash(tc.Path), tc.Matcher, mediaTypes)
if err != nil {
@ -556,9 +554,9 @@ func TestEndToEndSync(t *testing.T) {
}
deployer := &Deployer{
localFs: test.fs,
maxDeletes: -1,
bucket: test.bucket,
mediaTypes: media.DefaultTypes,
cfg: DeployConfig{MaxDeletes: -1},
}
// Initial deployment should sync remote with local.
@ -639,9 +637,9 @@ func TestMaxDeletes(t *testing.T) {
}
deployer := &Deployer{
localFs: test.fs,
maxDeletes: -1,
bucket: test.bucket,
mediaTypes: media.DefaultTypes,
cfg: DeployConfig{MaxDeletes: -1},
}
// Sync remote with local.
@ -662,7 +660,7 @@ func TestMaxDeletes(t *testing.T) {
}
// A deployment with maxDeletes=0 shouldn't change anything.
deployer.maxDeletes = 0
deployer.cfg.MaxDeletes = 0
if err := deployer.Deploy(ctx); err != nil {
t.Errorf("deploy failed: %v", err)
}
@ -672,7 +670,7 @@ func TestMaxDeletes(t *testing.T) {
}
// A deployment with maxDeletes=1 shouldn't change anything either.
deployer.maxDeletes = 1
deployer.cfg.MaxDeletes = 1
if err := deployer.Deploy(ctx); err != nil {
t.Errorf("deploy failed: %v", err)
}
@ -682,7 +680,7 @@ func TestMaxDeletes(t *testing.T) {
}
// A deployment with maxDeletes=2 should make the changes.
deployer.maxDeletes = 2
deployer.cfg.MaxDeletes = 2
if err := deployer.Deploy(ctx); err != nil {
t.Errorf("deploy failed: %v", err)
}
@ -700,7 +698,7 @@ func TestMaxDeletes(t *testing.T) {
}
// A deployment with maxDeletes=-1 should make the changes.
deployer.maxDeletes = -1
deployer.cfg.MaxDeletes = -1
if err := deployer.Deploy(ctx); err != nil {
t.Errorf("deploy failed: %v", err)
}
@ -762,7 +760,7 @@ func TestIncludeExclude(t *testing.T) {
if err != nil {
t.Fatal(err)
}
tgt := &target{
tgt := &Target{
Include: test.Include,
Exclude: test.Exclude,
}
@ -770,9 +768,8 @@ func TestIncludeExclude(t *testing.T) {
t.Error(err)
}
deployer := &Deployer{
localFs: fsTest.fs,
maxDeletes: -1,
bucket: fsTest.bucket,
localFs: fsTest.fs,
cfg: DeployConfig{MaxDeletes: -1}, bucket: fsTest.bucket,
target: tgt,
mediaTypes: media.DefaultTypes,
}
@ -828,9 +825,8 @@ func TestIncludeExcludeRemoteDelete(t *testing.T) {
t.Fatal(err)
}
deployer := &Deployer{
localFs: fsTest.fs,
maxDeletes: -1,
bucket: fsTest.bucket,
localFs: fsTest.fs,
cfg: DeployConfig{MaxDeletes: -1}, bucket: fsTest.bucket,
mediaTypes: media.DefaultTypes,
}
@ -848,7 +844,7 @@ func TestIncludeExcludeRemoteDelete(t *testing.T) {
}
// Second sync
tgt := &target{
tgt := &Target{
Include: test.Include,
Exclude: test.Exclude,
}
@ -882,7 +878,7 @@ func TestCompression(t *testing.T) {
deployer := &Deployer{
localFs: test.fs,
bucket: test.bucket,
matchers: []*matcher{{Pattern: ".*", Gzip: true, re: regexp.MustCompile(".*")}},
cfg: DeployConfig{MaxDeletes: -1, Matchers: []*Matcher{{Pattern: ".*", Gzip: true, re: regexp.MustCompile(".*")}}},
mediaTypes: media.DefaultTypes,
}
@ -937,7 +933,7 @@ func TestMatching(t *testing.T) {
deployer := &Deployer{
localFs: test.fs,
bucket: test.bucket,
matchers: []*matcher{{Pattern: "^subdir/aaa$", Force: true, re: regexp.MustCompile("^subdir/aaa$")}},
cfg: DeployConfig{MaxDeletes: -1, Matchers: []*Matcher{{Pattern: "^subdir/aaa$", Force: true, re: regexp.MustCompile("^subdir/aaa$")}}},
mediaTypes: media.DefaultTypes,
}
@ -962,7 +958,7 @@ func TestMatching(t *testing.T) {
}
// Repeat with a matcher that should now match 3 files.
deployer.matchers = []*matcher{{Pattern: "aaa", Force: true, re: regexp.MustCompile("aaa")}}
deployer.cfg.Matchers = []*Matcher{{Pattern: "aaa", Force: true, re: regexp.MustCompile("aaa")}}
if err := deployer.Deploy(ctx); err != nil {
t.Errorf("no-op deploy with triple force matcher: %v", err)
}