deploy.go (22118B)
1 // Copyright 2019 The Hugo Authors. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13
14 //go:build !nodeploy
15 // +build !nodeploy
16
17 package deploy
18
19 import (
20 "bytes"
21 "compress/gzip"
22 "context"
23 "crypto/md5"
24 "encoding/hex"
25 "fmt"
26 "io"
27 "io/ioutil"
28 "mime"
29 "os"
30 "path/filepath"
31 "regexp"
32 "runtime"
33 "sort"
34 "strings"
35 "sync"
36
37 "errors"
38
39 "github.com/dustin/go-humanize"
40 "github.com/gobwas/glob"
41 "github.com/gohugoio/hugo/config"
42 "github.com/gohugoio/hugo/media"
43 "github.com/spf13/afero"
44 jww "github.com/spf13/jwalterweatherman"
45 "golang.org/x/text/unicode/norm"
46
47 "gocloud.dev/blob"
48 _ "gocloud.dev/blob/fileblob" // import
49 _ "gocloud.dev/blob/gcsblob" // import
50 _ "gocloud.dev/blob/s3blob" // import
51 "gocloud.dev/gcerrors"
52 )
53
54 // Deployer supports deploying the site to target cloud providers.
55 type Deployer struct {
56 localFs afero.Fs
57 bucket *blob.Bucket
58
59 target *target // the target to deploy to
60 matchers []*matcher // matchers to apply to uploaded files
61 mediaTypes media.Types // Hugo's MediaType to guess ContentType
62 ordering []*regexp.Regexp // orders uploads
63 quiet bool // true reduces STDOUT
64 confirm bool // true enables confirmation before making changes
65 dryRun bool // true skips conformations and prints changes instead of applying them
66 force bool // true forces upload of all files
67 invalidateCDN bool // true enables invalidate CDN cache (if possible)
68 maxDeletes int // caps the # of files to delete; -1 to disable
69
70 // For tests...
71 summary deploySummary // summary of latest Deploy results
72 }
73
74 type deploySummary struct {
75 NumLocal, NumRemote, NumUploads, NumDeletes int
76 }
77
78 const metaMD5Hash = "md5chksum" // the meta key to store md5hash in
79
80 // New constructs a new *Deployer.
81 func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) {
82 targetName := cfg.GetString("target")
83
84 // Load the [deployment] section of the config.
85 dcfg, err := decodeConfig(cfg)
86 if err != nil {
87 return nil, err
88 }
89
90 if len(dcfg.Targets) == 0 {
91 return nil, errors.New("no deployment targets found")
92 }
93
94 // Find the target to deploy to.
95 var tgt *target
96 if targetName == "" {
97 // Default to the first target.
98 tgt = dcfg.Targets[0]
99 } else {
100 for _, t := range dcfg.Targets {
101 if t.Name == targetName {
102 tgt = t
103 }
104 }
105 if tgt == nil {
106 return nil, fmt.Errorf("deployment target %q not found", targetName)
107 }
108 }
109
110 return &Deployer{
111 localFs: localFs,
112 target: tgt,
113 matchers: dcfg.Matchers,
114 ordering: dcfg.ordering,
115 mediaTypes: dcfg.mediaTypes,
116 quiet: cfg.GetBool("quiet"),
117 confirm: cfg.GetBool("confirm"),
118 dryRun: cfg.GetBool("dryRun"),
119 force: cfg.GetBool("force"),
120 invalidateCDN: cfg.GetBool("invalidateCDN"),
121 maxDeletes: cfg.GetInt("maxDeletes"),
122 }, nil
123 }
124
125 func (d *Deployer) openBucket(ctx context.Context) (*blob.Bucket, error) {
126 if d.bucket != nil {
127 return d.bucket, nil
128 }
129 jww.FEEDBACK.Printf("Deploying to target %q (%s)\n", d.target.Name, d.target.URL)
130 return blob.OpenBucket(ctx, d.target.URL)
131 }
132
133 // Deploy deploys the site to a target.
134 func (d *Deployer) Deploy(ctx context.Context) error {
135 bucket, err := d.openBucket(ctx)
136 if err != nil {
137 return err
138 }
139
140 // Load local files from the source directory.
141 var include, exclude glob.Glob
142 if d.target != nil {
143 include, exclude = d.target.includeGlob, d.target.excludeGlob
144 }
145 local, err := walkLocal(d.localFs, d.matchers, include, exclude, d.mediaTypes)
146 if err != nil {
147 return err
148 }
149 jww.INFO.Printf("Found %d local files.\n", len(local))
150 d.summary.NumLocal = len(local)
151
152 // Load remote files from the target.
153 remote, err := walkRemote(ctx, bucket, include, exclude)
154 if err != nil {
155 return err
156 }
157 jww.INFO.Printf("Found %d remote files.\n", len(remote))
158 d.summary.NumRemote = len(remote)
159
160 // Diff local vs remote to see what changes need to be applied.
161 uploads, deletes := findDiffs(local, remote, d.force)
162 d.summary.NumUploads = len(uploads)
163 d.summary.NumDeletes = len(deletes)
164 if len(uploads)+len(deletes) == 0 {
165 if !d.quiet {
166 jww.FEEDBACK.Println("No changes required.")
167 }
168 return nil
169 }
170 if !d.quiet {
171 jww.FEEDBACK.Println(summarizeChanges(uploads, deletes))
172 }
173
174 // Ask for confirmation before proceeding.
175 if d.confirm && !d.dryRun {
176 fmt.Printf("Continue? (Y/n) ")
177 var confirm string
178 if _, err := fmt.Scanln(&confirm); err != nil {
179 return err
180 }
181 if confirm != "" && confirm[0] != 'y' && confirm[0] != 'Y' {
182 return errors.New("aborted")
183 }
184 }
185
186 // Order the uploads. They are organized in groups; all uploads in a group
187 // must be complete before moving on to the next group.
188 uploadGroups := applyOrdering(d.ordering, uploads)
189
190 // Apply the changes in parallel, using an inverted worker
191 // pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
192 // sem prevents more than nParallel concurrent goroutines.
193 const nParallel = 10
194 var errs []error
195 var errMu sync.Mutex // protects errs
196
197 for _, uploads := range uploadGroups {
198 // Short-circuit for an empty group.
199 if len(uploads) == 0 {
200 continue
201 }
202
203 // Within the group, apply uploads in parallel.
204 sem := make(chan struct{}, nParallel)
205 for _, upload := range uploads {
206 if d.dryRun {
207 if !d.quiet {
208 jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
209 }
210 continue
211 }
212
213 sem <- struct{}{}
214 go func(upload *fileToUpload) {
215 if err := doSingleUpload(ctx, bucket, upload); err != nil {
216 errMu.Lock()
217 defer errMu.Unlock()
218 errs = append(errs, err)
219 }
220 <-sem
221 }(upload)
222 }
223 // Wait for all uploads in the group to finish.
224 for n := nParallel; n > 0; n-- {
225 sem <- struct{}{}
226 }
227 }
228
229 if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
230 jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
231 d.summary.NumDeletes = 0
232 } else {
233 // Apply deletes in parallel.
234 sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] })
235 sem := make(chan struct{}, nParallel)
236 for _, del := range deletes {
237 if d.dryRun {
238 if !d.quiet {
239 jww.FEEDBACK.Printf("[DRY RUN] Would delete %s\n", del)
240 }
241 continue
242 }
243 sem <- struct{}{}
244 go func(del string) {
245 jww.INFO.Printf("Deleting %s...\n", del)
246 if err := bucket.Delete(ctx, del); err != nil {
247 if gcerrors.Code(err) == gcerrors.NotFound {
248 jww.WARN.Printf("Failed to delete %q because it wasn't found: %v", del, err)
249 } else {
250 errMu.Lock()
251 defer errMu.Unlock()
252 errs = append(errs, err)
253 }
254 }
255 <-sem
256 }(del)
257 }
258 // Wait for all deletes to finish.
259 for n := nParallel; n > 0; n-- {
260 sem <- struct{}{}
261 }
262 }
263 if len(errs) > 0 {
264 if !d.quiet {
265 jww.FEEDBACK.Printf("Encountered %d errors.\n", len(errs))
266 }
267 return errs[0]
268 }
269 if !d.quiet {
270 jww.FEEDBACK.Println("Success!")
271 }
272
273 if d.invalidateCDN {
274 if d.target.CloudFrontDistributionID != "" {
275 if d.dryRun {
276 if !d.quiet {
277 jww.FEEDBACK.Printf("[DRY RUN] Would invalidate CloudFront CDN with ID %s\n", d.target.CloudFrontDistributionID)
278 }
279 } else {
280 jww.FEEDBACK.Println("Invalidating CloudFront CDN...")
281 if err := InvalidateCloudFront(ctx, d.target.CloudFrontDistributionID); err != nil {
282 jww.FEEDBACK.Printf("Failed to invalidate CloudFront CDN: %v\n", err)
283 return err
284 }
285 }
286 }
287 if d.target.GoogleCloudCDNOrigin != "" {
288 if d.dryRun {
289 if !d.quiet {
290 jww.FEEDBACK.Printf("[DRY RUN] Would invalidate Google Cloud CDN with origin %s\n", d.target.GoogleCloudCDNOrigin)
291 }
292 } else {
293 jww.FEEDBACK.Println("Invalidating Google Cloud CDN...")
294 if err := InvalidateGoogleCloudCDN(ctx, d.target.GoogleCloudCDNOrigin); err != nil {
295 jww.FEEDBACK.Printf("Failed to invalidate Google Cloud CDN: %v\n", err)
296 return err
297 }
298 }
299 }
300 jww.FEEDBACK.Println("Success!")
301 }
302 return nil
303 }
304
305 // summarizeChanges creates a text description of the proposed changes.
306 func summarizeChanges(uploads []*fileToUpload, deletes []string) string {
307 uploadSize := int64(0)
308 for _, u := range uploads {
309 uploadSize += u.Local.UploadSize
310 }
311 return fmt.Sprintf("Identified %d file(s) to upload, totaling %s, and %d file(s) to delete.", len(uploads), humanize.Bytes(uint64(uploadSize)), len(deletes))
312 }
313
314 // doSingleUpload executes a single file upload.
315 func doSingleUpload(ctx context.Context, bucket *blob.Bucket, upload *fileToUpload) error {
316 jww.INFO.Printf("Uploading %v...\n", upload)
317 opts := &blob.WriterOptions{
318 CacheControl: upload.Local.CacheControl(),
319 ContentEncoding: upload.Local.ContentEncoding(),
320 ContentType: upload.Local.ContentType(),
321 Metadata: map[string]string{metaMD5Hash: hex.EncodeToString(upload.Local.MD5())},
322 }
323 w, err := bucket.NewWriter(ctx, upload.Local.SlashPath, opts)
324 if err != nil {
325 return err
326 }
327 r, err := upload.Local.Reader()
328 if err != nil {
329 return err
330 }
331 defer r.Close()
332 _, err = io.Copy(w, r)
333 if err != nil {
334 return err
335 }
336 if err := w.Close(); err != nil {
337 return err
338 }
339 return nil
340 }
341
342 // localFile represents a local file from the source. Use newLocalFile to
343 // construct one.
344 type localFile struct {
345 // NativePath is the native path to the file (using file.Separator).
346 NativePath string
347 // SlashPath is NativePath converted to use /.
348 SlashPath string
349 // UploadSize is the size of the content to be uploaded. It may not
350 // be the same as the local file size if the content will be
351 // gzipped before upload.
352 UploadSize int64
353
354 fs afero.Fs
355 matcher *matcher
356 md5 []byte // cache
357 gzipped bytes.Buffer // cached of gzipped contents if gzipping
358 mediaTypes media.Types
359 }
360
361 // newLocalFile initializes a *localFile.
362 func newLocalFile(fs afero.Fs, nativePath, slashpath string, m *matcher, mt media.Types) (*localFile, error) {
363 f, err := fs.Open(nativePath)
364 if err != nil {
365 return nil, err
366 }
367 defer f.Close()
368 lf := &localFile{
369 NativePath: nativePath,
370 SlashPath: slashpath,
371 fs: fs,
372 matcher: m,
373 mediaTypes: mt,
374 }
375 if m != nil && m.Gzip {
376 // We're going to gzip the content. Do it once now, and cache the result
377 // in gzipped. The UploadSize is the size of the gzipped content.
378 gz := gzip.NewWriter(&lf.gzipped)
379 if _, err := io.Copy(gz, f); err != nil {
380 return nil, err
381 }
382 if err := gz.Close(); err != nil {
383 return nil, err
384 }
385 lf.UploadSize = int64(lf.gzipped.Len())
386 } else {
387 // Raw content. Just get the UploadSize.
388 info, err := f.Stat()
389 if err != nil {
390 return nil, err
391 }
392 lf.UploadSize = info.Size()
393 }
394 return lf, nil
395 }
396
397 // Reader returns an io.ReadCloser for reading the content to be uploaded.
398 // The caller must call Close on the returned ReaderCloser.
399 // The reader content may not be the same as the local file content due to
400 // gzipping.
401 func (lf *localFile) Reader() (io.ReadCloser, error) {
402 if lf.matcher != nil && lf.matcher.Gzip {
403 // We've got the gzipped contents cached in gzipped.
404 // Note: we can't use lf.gzipped directly as a Reader, since we it discards
405 // data after it is read, and we may read it more than once.
406 return ioutil.NopCloser(bytes.NewReader(lf.gzipped.Bytes())), nil
407 }
408 // Not expected to fail since we did it successfully earlier in newLocalFile,
409 // but could happen due to changes in the underlying filesystem.
410 return lf.fs.Open(lf.NativePath)
411 }
412
413 // CacheControl returns the Cache-Control header to use for lf, based on the
414 // first matching matcher (if any).
415 func (lf *localFile) CacheControl() string {
416 if lf.matcher == nil {
417 return ""
418 }
419 return lf.matcher.CacheControl
420 }
421
422 // ContentEncoding returns the Content-Encoding header to use for lf, based
423 // on the matcher's Content-Encoding and Gzip fields.
424 func (lf *localFile) ContentEncoding() string {
425 if lf.matcher == nil {
426 return ""
427 }
428 if lf.matcher.Gzip {
429 return "gzip"
430 }
431 return lf.matcher.ContentEncoding
432 }
433
434 // ContentType returns the Content-Type header to use for lf.
435 // It first checks if there's a Content-Type header configured via a matching
436 // matcher; if not, it tries to generate one based on the filename extension.
437 // If this fails, the Content-Type will be the empty string. In this case, Go
438 // Cloud will automatically try to infer a Content-Type based on the file
439 // content.
440 func (lf *localFile) ContentType() string {
441 if lf.matcher != nil && lf.matcher.ContentType != "" {
442 return lf.matcher.ContentType
443 }
444
445 ext := filepath.Ext(lf.NativePath)
446 if mimeType, _, found := lf.mediaTypes.GetFirstBySuffix(strings.TrimPrefix(ext, ".")); found {
447 return mimeType.Type()
448 }
449
450 return mime.TypeByExtension(ext)
451 }
452
453 // Force returns true if the file should be forced to re-upload based on the
454 // matching matcher.
455 func (lf *localFile) Force() bool {
456 return lf.matcher != nil && lf.matcher.Force
457 }
458
459 // MD5 returns an MD5 hash of the content to be uploaded.
460 func (lf *localFile) MD5() []byte {
461 if len(lf.md5) > 0 {
462 return lf.md5
463 }
464 h := md5.New()
465 r, err := lf.Reader()
466 if err != nil {
467 return nil
468 }
469 defer r.Close()
470 if _, err := io.Copy(h, r); err != nil {
471 return nil
472 }
473 lf.md5 = h.Sum(nil)
474 return lf.md5
475 }
476
477 // knownHiddenDirectory checks if the specified name is a well known
478 // hidden directory.
479 func knownHiddenDirectory(name string) bool {
480 knownDirectories := []string{
481 ".well-known",
482 }
483
484 for _, dir := range knownDirectories {
485 if name == dir {
486 return true
487 }
488 }
489 return false
490 }
491
492 // walkLocal walks the source directory and returns a flat list of files,
493 // using localFile.SlashPath as the map keys.
494 func walkLocal(fs afero.Fs, matchers []*matcher, include, exclude glob.Glob, mediaTypes media.Types) (map[string]*localFile, error) {
495 retval := map[string]*localFile{}
496 err := afero.Walk(fs, "", func(path string, info os.FileInfo, err error) error {
497 if err != nil {
498 return err
499 }
500 if info.IsDir() {
501 // Skip hidden directories.
502 if path != "" && strings.HasPrefix(info.Name(), ".") {
503 // Except for specific hidden directories
504 if !knownHiddenDirectory(info.Name()) {
505 return filepath.SkipDir
506 }
507 }
508 return nil
509 }
510
511 // .DS_Store is an internal MacOS attribute file; skip it.
512 if info.Name() == ".DS_Store" {
513 return nil
514 }
515
516 // When a file system is HFS+, its filepath is in NFD form.
517 if runtime.GOOS == "darwin" {
518 path = norm.NFC.String(path)
519 }
520
521 // Check include/exclude matchers.
522 slashpath := filepath.ToSlash(path)
523 if include != nil && !include.Match(slashpath) {
524 jww.INFO.Printf(" dropping %q due to include\n", slashpath)
525 return nil
526 }
527 if exclude != nil && exclude.Match(slashpath) {
528 jww.INFO.Printf(" dropping %q due to exclude\n", slashpath)
529 return nil
530 }
531
532 // Find the first matching matcher (if any).
533 var m *matcher
534 for _, cur := range matchers {
535 if cur.Matches(slashpath) {
536 m = cur
537 break
538 }
539 }
540 lf, err := newLocalFile(fs, path, slashpath, m, mediaTypes)
541 if err != nil {
542 return err
543 }
544 retval[lf.SlashPath] = lf
545 return nil
546 })
547 if err != nil {
548 return nil, err
549 }
550 return retval, nil
551 }
552
553 // walkRemote walks the target bucket and returns a flat list.
554 func walkRemote(ctx context.Context, bucket *blob.Bucket, include, exclude glob.Glob) (map[string]*blob.ListObject, error) {
555 retval := map[string]*blob.ListObject{}
556 iter := bucket.List(nil)
557 for {
558 obj, err := iter.Next(ctx)
559 if err == io.EOF {
560 break
561 }
562 if err != nil {
563 return nil, err
564 }
565 // Check include/exclude matchers.
566 if include != nil && !include.Match(obj.Key) {
567 jww.INFO.Printf(" remote dropping %q due to include\n", obj.Key)
568 continue
569 }
570 if exclude != nil && exclude.Match(obj.Key) {
571 jww.INFO.Printf(" remote dropping %q due to exclude\n", obj.Key)
572 continue
573 }
574 // If the remote didn't give us an MD5, use remote attributes MD5, if that doesn't exist compute one.
575 // This can happen for some providers (e.g., fileblob, which uses the
576 // local filesystem), but not for the most common Cloud providers
577 // (S3, GCS, Azure). Although, it can happen for S3 if the blob was uploaded
578 // via a multi-part upload.
579 // Although it's unfortunate to have to read the file, it's likely better
580 // than assuming a delta and re-uploading it.
581 if len(obj.MD5) == 0 {
582 var attrMD5 []byte
583 attrs, err := bucket.Attributes(ctx, obj.Key)
584 if err == nil {
585 md5String, exists := attrs.Metadata[metaMD5Hash]
586 if exists {
587 attrMD5, _ = hex.DecodeString(md5String)
588 }
589 }
590 if len(attrMD5) == 0 {
591 r, err := bucket.NewReader(ctx, obj.Key, nil)
592 if err == nil {
593 h := md5.New()
594 if _, err := io.Copy(h, r); err == nil {
595 obj.MD5 = h.Sum(nil)
596 }
597 r.Close()
598 }
599 } else {
600 obj.MD5 = attrMD5
601 }
602 }
603 retval[obj.Key] = obj
604 }
605 return retval, nil
606 }
607
608 // uploadReason is an enum of reasons why a file must be uploaded.
609 type uploadReason string
610
611 const (
612 reasonUnknown uploadReason = "unknown"
613 reasonNotFound uploadReason = "not found at target"
614 reasonForce uploadReason = "--force"
615 reasonSize uploadReason = "size differs"
616 reasonMD5Differs uploadReason = "md5 differs"
617 reasonMD5Missing uploadReason = "remote md5 missing"
618 )
619
620 // fileToUpload represents a single local file that should be uploaded to
621 // the target.
622 type fileToUpload struct {
623 Local *localFile
624 Reason uploadReason
625 }
626
627 func (u *fileToUpload) String() string {
628 details := []string{humanize.Bytes(uint64(u.Local.UploadSize))}
629 if s := u.Local.CacheControl(); s != "" {
630 details = append(details, fmt.Sprintf("Cache-Control: %q", s))
631 }
632 if s := u.Local.ContentEncoding(); s != "" {
633 details = append(details, fmt.Sprintf("Content-Encoding: %q", s))
634 }
635 if s := u.Local.ContentType(); s != "" {
636 details = append(details, fmt.Sprintf("Content-Type: %q", s))
637 }
638 return fmt.Sprintf("%s (%s): %v", u.Local.SlashPath, strings.Join(details, ", "), u.Reason)
639 }
640
641 // findDiffs diffs localFiles vs remoteFiles to see what changes should be
642 // applied to the remote target. It returns a slice of *fileToUpload and a
643 // slice of paths for files to delete.
644 func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.ListObject, force bool) ([]*fileToUpload, []string) {
645 var uploads []*fileToUpload
646 var deletes []string
647
648 found := map[string]bool{}
649 for path, lf := range localFiles {
650 upload := false
651 reason := reasonUnknown
652
653 if remoteFile, ok := remoteFiles[path]; ok {
654 // The file exists in remote. Let's see if we need to upload it anyway.
655
656 // TODO: We don't register a diff if the metadata (e.g., Content-Type
657 // header) has changed. This would be difficult/expensive to detect; some
658 // providers return metadata along with their "List" result, but others
659 // (notably AWS S3) do not, so gocloud.dev's blob.Bucket doesn't expose
660 // it in the list result. It would require a separate request per blob
661 // to fetch. At least for now, we work around this by documenting it and
662 // providing a "force" flag (to re-upload everything) and a "force" bool
663 // per matcher (to re-upload all files in a matcher whose headers may have
664 // changed).
665 // Idea: extract a sample set of 1 file per extension + 1 file per matcher
666 // and check those files?
667 if force {
668 upload = true
669 reason = reasonForce
670 } else if lf.Force() {
671 upload = true
672 reason = reasonForce
673 } else if lf.UploadSize != remoteFile.Size {
674 upload = true
675 reason = reasonSize
676 } else if len(remoteFile.MD5) == 0 {
677 // This shouldn't happen unless the remote didn't give us an MD5 hash
678 // from List, AND we failed to compute one by reading the remote file.
679 // Default to considering the files different.
680 upload = true
681 reason = reasonMD5Missing
682 } else if !bytes.Equal(lf.MD5(), remoteFile.MD5) {
683 upload = true
684 reason = reasonMD5Differs
685 } else {
686 // Nope! Leave uploaded = false.
687 }
688 found[path] = true
689 } else {
690 // The file doesn't exist in remote.
691 upload = true
692 reason = reasonNotFound
693 }
694 if upload {
695 jww.DEBUG.Printf("%s needs to be uploaded: %v\n", path, reason)
696 uploads = append(uploads, &fileToUpload{lf, reason})
697 } else {
698 jww.DEBUG.Printf("%s exists at target and does not need to be uploaded", path)
699 }
700 }
701
702 // Remote files that weren't found locally should be deleted.
703 for path := range remoteFiles {
704 if !found[path] {
705 deletes = append(deletes, path)
706 }
707 }
708 return uploads, deletes
709 }
710
711 // applyOrdering returns an ordered slice of slices of uploads.
712 //
713 // The returned slice will have length len(ordering)+1.
714 //
715 // The subslice at index i, for i = 0 ... len(ordering)-1, will have all of the
716 // uploads whose Local.SlashPath matched the regex at ordering[i] (but not any
717 // previous ordering regex).
718 // The subslice at index len(ordering) will have the remaining uploads that
719 // didn't match any ordering regex.
720 //
721 // The subslices are sorted by Local.SlashPath.
722 func applyOrdering(ordering []*regexp.Regexp, uploads []*fileToUpload) [][]*fileToUpload {
723 // Sort the whole slice by Local.SlashPath first.
724 sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.SlashPath < uploads[j].Local.SlashPath })
725
726 retval := make([][]*fileToUpload, len(ordering)+1)
727 for _, u := range uploads {
728 matched := false
729 for i, re := range ordering {
730 if re.MatchString(u.Local.SlashPath) {
731 retval[i] = append(retval[i], u)
732 matched = true
733 break
734 }
735 }
736 if !matched {
737 retval[len(ordering)] = append(retval[len(ordering)], u)
738 }
739 }
740 return retval
741 }