resource_cache.go (7149B)
1 // Copyright 2019 The Hugo Authors. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13
14 package resources
15
16 import (
17 "encoding/json"
18 "io"
19 "path"
20 "path/filepath"
21 "regexp"
22 "strings"
23 "sync"
24
25 "github.com/gohugoio/hugo/helpers"
26
27 "github.com/gohugoio/hugo/hugofs/glob"
28
29 "github.com/gohugoio/hugo/resources/resource"
30
31 "github.com/gohugoio/hugo/cache/filecache"
32
33 "github.com/BurntSushi/locker"
34 )
35
36 const (
37 CACHE_CLEAR_ALL = "clear_all"
38 CACHE_OTHER = "other"
39 )
40
41 type ResourceCache struct {
42 rs *Spec
43
44 sync.RWMutex
45
46 // Either resource.Resource or resource.Resources.
47 cache map[string]any
48
49 fileCache *filecache.Cache
50
51 // Provides named resource locks.
52 nlocker *locker.Locker
53 }
54
55 // ResourceCacheKey converts the filename into the format used in the resource
56 // cache.
57 func ResourceCacheKey(filename string) string {
58 filename = filepath.ToSlash(filename)
59 return path.Join(resourceKeyPartition(filename), filename)
60 }
61
62 func resourceKeyPartition(filename string) string {
63 ext := strings.TrimPrefix(path.Ext(filepath.ToSlash(filename)), ".")
64 if ext == "" {
65 ext = CACHE_OTHER
66 }
67 return ext
68 }
69
70 // Commonly used aliases and directory names used for some types.
71 var extAliasKeywords = map[string][]string{
72 "sass": {"scss"},
73 "scss": {"sass"},
74 }
75
76 // ResourceKeyPartitions resolves a ordered slice of partitions that is
77 // used to do resource cache invalidations.
78 //
79 // We use the first directory path element and the extension, so:
80 // a/b.json => "a", "json"
81 // b.json => "json"
82 //
83 // For some of the extensions we will also map to closely related types,
84 // e.g. "scss" will also return "sass".
85 //
86 func ResourceKeyPartitions(filename string) []string {
87 var partitions []string
88 filename = glob.NormalizePath(filename)
89 dir, name := path.Split(filename)
90 ext := strings.TrimPrefix(path.Ext(filepath.ToSlash(name)), ".")
91
92 if dir != "" {
93 partitions = append(partitions, strings.Split(dir, "/")[0])
94 }
95
96 if ext != "" {
97 partitions = append(partitions, ext)
98 }
99
100 if aliases, found := extAliasKeywords[ext]; found {
101 partitions = append(partitions, aliases...)
102 }
103
104 if len(partitions) == 0 {
105 partitions = []string{CACHE_OTHER}
106 }
107
108 return helpers.UniqueStringsSorted(partitions)
109 }
110
111 // ResourceKeyContainsAny returns whether the key is a member of any of the
112 // given partitions.
113 //
114 // This is used for resource cache invalidation.
115 func ResourceKeyContainsAny(key string, partitions []string) bool {
116 parts := strings.Split(key, "/")
117 for _, p1 := range partitions {
118 for _, p2 := range parts {
119 if p1 == p2 {
120 return true
121 }
122 }
123 }
124 return false
125 }
126
127 func newResourceCache(rs *Spec) *ResourceCache {
128 return &ResourceCache{
129 rs: rs,
130 fileCache: rs.FileCaches.AssetsCache(),
131 cache: make(map[string]any),
132 nlocker: locker.NewLocker(),
133 }
134 }
135
136 func (c *ResourceCache) clear() {
137 c.Lock()
138 defer c.Unlock()
139
140 c.cache = make(map[string]any)
141 c.nlocker = locker.NewLocker()
142 }
143
144 func (c *ResourceCache) Contains(key string) bool {
145 key = c.cleanKey(filepath.ToSlash(key))
146 _, found := c.get(key)
147 return found
148 }
149
150 func (c *ResourceCache) cleanKey(key string) string {
151 return strings.TrimPrefix(path.Clean(strings.ToLower(key)), "/")
152 }
153
154 func (c *ResourceCache) get(key string) (any, bool) {
155 c.RLock()
156 defer c.RUnlock()
157 r, found := c.cache[key]
158 return r, found
159 }
160
161 func (c *ResourceCache) GetOrCreate(key string, f func() (resource.Resource, error)) (resource.Resource, error) {
162 r, err := c.getOrCreate(key, func() (any, error) { return f() })
163 if r == nil || err != nil {
164 return nil, err
165 }
166 return r.(resource.Resource), nil
167 }
168
169 func (c *ResourceCache) GetOrCreateResources(key string, f func() (resource.Resources, error)) (resource.Resources, error) {
170 r, err := c.getOrCreate(key, func() (any, error) { return f() })
171 if r == nil || err != nil {
172 return nil, err
173 }
174 return r.(resource.Resources), nil
175 }
176
177 func (c *ResourceCache) getOrCreate(key string, f func() (any, error)) (any, error) {
178 key = c.cleanKey(key)
179 // First check in-memory cache.
180 r, found := c.get(key)
181 if found {
182 return r, nil
183 }
184 // This is a potentially long running operation, so get a named lock.
185 c.nlocker.Lock(key)
186
187 // Double check in-memory cache.
188 r, found = c.get(key)
189 if found {
190 c.nlocker.Unlock(key)
191 return r, nil
192 }
193
194 defer c.nlocker.Unlock(key)
195
196 r, err := f()
197 if err != nil {
198 return nil, err
199 }
200
201 c.set(key, r)
202
203 return r, nil
204 }
205
206 func (c *ResourceCache) getFilenames(key string) (string, string) {
207 filenameMeta := key + ".json"
208 filenameContent := key + ".content"
209
210 return filenameMeta, filenameContent
211 }
212
213 func (c *ResourceCache) getFromFile(key string) (filecache.ItemInfo, io.ReadCloser, transformedResourceMetadata, bool) {
214 c.RLock()
215 defer c.RUnlock()
216
217 var meta transformedResourceMetadata
218 filenameMeta, filenameContent := c.getFilenames(key)
219
220 _, jsonContent, _ := c.fileCache.GetBytes(filenameMeta)
221 if jsonContent == nil {
222 return filecache.ItemInfo{}, nil, meta, false
223 }
224
225 if err := json.Unmarshal(jsonContent, &meta); err != nil {
226 return filecache.ItemInfo{}, nil, meta, false
227 }
228
229 fi, rc, _ := c.fileCache.Get(filenameContent)
230
231 return fi, rc, meta, rc != nil
232 }
233
234 // writeMeta writes the metadata to file and returns a writer for the content part.
235 func (c *ResourceCache) writeMeta(key string, meta transformedResourceMetadata) (filecache.ItemInfo, io.WriteCloser, error) {
236 filenameMeta, filenameContent := c.getFilenames(key)
237 raw, err := json.Marshal(meta)
238 if err != nil {
239 return filecache.ItemInfo{}, nil, err
240 }
241
242 _, fm, err := c.fileCache.WriteCloser(filenameMeta)
243 if err != nil {
244 return filecache.ItemInfo{}, nil, err
245 }
246 defer fm.Close()
247
248 if _, err := fm.Write(raw); err != nil {
249 return filecache.ItemInfo{}, nil, err
250 }
251
252 fi, fc, err := c.fileCache.WriteCloser(filenameContent)
253
254 return fi, fc, err
255 }
256
257 func (c *ResourceCache) set(key string, r any) {
258 c.Lock()
259 defer c.Unlock()
260 c.cache[key] = r
261 }
262
263 func (c *ResourceCache) DeletePartitions(partitions ...string) {
264 partitionsSet := map[string]bool{
265 // Always clear out the resources not matching any partition.
266 "other": true,
267 }
268 for _, p := range partitions {
269 partitionsSet[p] = true
270 }
271
272 if partitionsSet[CACHE_CLEAR_ALL] {
273 c.clear()
274 return
275 }
276
277 c.Lock()
278 defer c.Unlock()
279
280 for k := range c.cache {
281 clear := false
282 for p := range partitionsSet {
283 if strings.Contains(k, p) {
284 // There will be some false positive, but that's fine.
285 clear = true
286 break
287 }
288 }
289
290 if clear {
291 delete(c.cache, k)
292 }
293 }
294 }
295
296 func (c *ResourceCache) DeleteMatches(re *regexp.Regexp) {
297 c.Lock()
298 defer c.Unlock()
299
300 for k := range c.cache {
301 if re.MatchString(k) {
302 delete(c.cache, k)
303 }
304 }
305 }