Add support for copying files and folders.
This commit is contained in:
101
vendor/github.com/evanw/esbuild/internal/cache/cache.go
generated
vendored
Normal file
101
vendor/github.com/evanw/esbuild/internal/cache/cache.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/evanw/esbuild/internal/logger"
|
||||
"github.com/evanw/esbuild/internal/runtime"
|
||||
)
|
||||
|
||||
// This is a cache of the parsed contents of a set of files. The idea is to be
|
||||
// able to reuse the results of parsing between builds and make subsequent
|
||||
// builds faster by avoiding redundant parsing work. This only works if:
|
||||
//
|
||||
// * The AST information in the cache must be considered immutable. There is
|
||||
// no way to enforce this in Go, but please be disciplined about this. The
|
||||
// ASTs are shared in between builds. Any information that must be mutated
|
||||
// in the AST during a build must be done on a shallow clone of the data if
|
||||
// the mutation happens after parsing (i.e. a clone that clones everything
|
||||
// that will be mutated and shares only the parts that won't be mutated).
|
||||
//
|
||||
// * The information in the cache must not depend at all on the contents of
|
||||
// any file other than the file being cached. Invalidating an entry in the
|
||||
// cache does not also invalidate any entries that depend on that file, so
|
||||
// caching information that depends on other files can result in incorrect
|
||||
// results due to reusing stale data. For example, do not "bake in" some
|
||||
// value imported from another file.
|
||||
//
|
||||
// * Cached ASTs must only be reused if the parsing options are identical
|
||||
// between builds. For example, it would be bad if the AST parser depended
|
||||
// on options inherited from a nearby "package.json" file but those options
|
||||
// were not part of the cache key. Then the cached AST could incorrectly be
|
||||
// reused even if the contents of that "package.json" file have changed.
|
||||
//
|
||||
type CacheSet struct {
|
||||
SourceIndexCache SourceIndexCache
|
||||
FSCache FSCache
|
||||
CSSCache CSSCache
|
||||
JSONCache JSONCache
|
||||
JSCache JSCache
|
||||
}
|
||||
|
||||
func MakeCacheSet() *CacheSet {
|
||||
return &CacheSet{
|
||||
SourceIndexCache: SourceIndexCache{
|
||||
entries: make(map[sourceIndexKey]uint32),
|
||||
nextSourceIndex: runtime.SourceIndex + 1,
|
||||
},
|
||||
FSCache: FSCache{
|
||||
entries: make(map[string]*fsEntry),
|
||||
},
|
||||
CSSCache: CSSCache{
|
||||
entries: make(map[logger.Path]*cssCacheEntry),
|
||||
},
|
||||
JSONCache: JSONCache{
|
||||
entries: make(map[logger.Path]*jsonCacheEntry),
|
||||
},
|
||||
JSCache: JSCache{
|
||||
entries: make(map[logger.Path]*jsCacheEntry),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SourceIndexCache struct {
|
||||
mutex sync.Mutex
|
||||
entries map[sourceIndexKey]uint32
|
||||
nextSourceIndex uint32
|
||||
}
|
||||
|
||||
type SourceIndexKind uint8
|
||||
|
||||
const (
|
||||
SourceIndexNormal SourceIndexKind = iota
|
||||
SourceIndexJSStubForCSS
|
||||
)
|
||||
|
||||
type sourceIndexKey struct {
|
||||
path logger.Path
|
||||
kind SourceIndexKind
|
||||
}
|
||||
|
||||
func (c *SourceIndexCache) LenHint() uint32 {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
// Add some extra room at the end for a new file or two without reallocating
|
||||
const someExtraRoom = 16
|
||||
return c.nextSourceIndex + someExtraRoom
|
||||
}
|
||||
|
||||
func (c *SourceIndexCache) Get(path logger.Path, kind SourceIndexKind) uint32 {
|
||||
key := sourceIndexKey{path: path, kind: kind}
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
if sourceIndex, ok := c.entries[key]; ok {
|
||||
return sourceIndex
|
||||
}
|
||||
sourceIndex := c.nextSourceIndex
|
||||
c.nextSourceIndex++
|
||||
c.entries[key] = sourceIndex
|
||||
return sourceIndex
|
||||
}
|
190
vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go
generated
vendored
Normal file
190
vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/evanw/esbuild/internal/css_ast"
|
||||
"github.com/evanw/esbuild/internal/css_parser"
|
||||
"github.com/evanw/esbuild/internal/js_ast"
|
||||
"github.com/evanw/esbuild/internal/js_parser"
|
||||
"github.com/evanw/esbuild/internal/logger"
|
||||
)
|
||||
|
||||
// This cache intends to avoid unnecessarily re-parsing files in subsequent
|
||||
// builds. For a given path, parsing can be avoided if the contents of the file
|
||||
// and the options for the parser are the same as last time. Even if the
|
||||
// contents of the file are the same, the options for the parser may have
|
||||
// changed if they depend on some other file ("package.json" for example).
|
||||
//
|
||||
// This cache checks if the file contents have changed even though we have
|
||||
// the ability to detect if a file has changed on the file system by reading
|
||||
// its metadata. First of all, if the file contents are cached then they should
|
||||
// be the same pointer, which makes the comparison trivial. Also we want to
|
||||
// cache the AST for plugins in the common case that the plugin output stays
|
||||
// the same.
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// CSS
|
||||
|
||||
type CSSCache struct {
|
||||
mutex sync.Mutex
|
||||
entries map[logger.Path]*cssCacheEntry
|
||||
}
|
||||
|
||||
type cssCacheEntry struct {
|
||||
source logger.Source
|
||||
options css_parser.Options
|
||||
ast css_ast.AST
|
||||
msgs []logger.Msg
|
||||
}
|
||||
|
||||
func (c *CSSCache) Parse(log logger.Log, source logger.Source, options css_parser.Options) css_ast.AST {
|
||||
// Check the cache
|
||||
entry := func() *cssCacheEntry {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.entries[source.KeyPath]
|
||||
}()
|
||||
|
||||
// Cache hit
|
||||
if entry != nil && entry.source == source && entry.options == options {
|
||||
for _, msg := range entry.msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
return entry.ast
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
tempLog := logger.NewDeferLog(logger.DeferLogAll)
|
||||
ast := css_parser.Parse(tempLog, source, options)
|
||||
msgs := tempLog.Done()
|
||||
for _, msg := range msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
|
||||
// Create the cache entry
|
||||
entry = &cssCacheEntry{
|
||||
source: source,
|
||||
options: options,
|
||||
ast: ast,
|
||||
msgs: msgs,
|
||||
}
|
||||
|
||||
// Save for next time
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.entries[source.KeyPath] = entry
|
||||
return ast
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// JSON
|
||||
|
||||
type JSONCache struct {
|
||||
mutex sync.Mutex
|
||||
entries map[logger.Path]*jsonCacheEntry
|
||||
}
|
||||
|
||||
type jsonCacheEntry struct {
|
||||
source logger.Source
|
||||
options js_parser.JSONOptions
|
||||
expr js_ast.Expr
|
||||
ok bool
|
||||
msgs []logger.Msg
|
||||
}
|
||||
|
||||
func (c *JSONCache) Parse(log logger.Log, source logger.Source, options js_parser.JSONOptions) (js_ast.Expr, bool) {
|
||||
// Check the cache
|
||||
entry := func() *jsonCacheEntry {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.entries[source.KeyPath]
|
||||
}()
|
||||
|
||||
// Cache hit
|
||||
if entry != nil && entry.source == source && entry.options == options {
|
||||
for _, msg := range entry.msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
return entry.expr, entry.ok
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
tempLog := logger.NewDeferLog(logger.DeferLogAll)
|
||||
expr, ok := js_parser.ParseJSON(tempLog, source, options)
|
||||
msgs := tempLog.Done()
|
||||
for _, msg := range msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
|
||||
// Create the cache entry
|
||||
entry = &jsonCacheEntry{
|
||||
source: source,
|
||||
options: options,
|
||||
expr: expr,
|
||||
ok: ok,
|
||||
msgs: msgs,
|
||||
}
|
||||
|
||||
// Save for next time
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.entries[source.KeyPath] = entry
|
||||
return expr, ok
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// JS
|
||||
|
||||
type JSCache struct {
|
||||
mutex sync.Mutex
|
||||
entries map[logger.Path]*jsCacheEntry
|
||||
}
|
||||
|
||||
type jsCacheEntry struct {
|
||||
source logger.Source
|
||||
options js_parser.Options
|
||||
ast js_ast.AST
|
||||
ok bool
|
||||
msgs []logger.Msg
|
||||
}
|
||||
|
||||
func (c *JSCache) Parse(log logger.Log, source logger.Source, options js_parser.Options) (js_ast.AST, bool) {
|
||||
// Check the cache
|
||||
entry := func() *jsCacheEntry {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.entries[source.KeyPath]
|
||||
}()
|
||||
|
||||
// Cache hit
|
||||
if entry != nil && entry.source == source && entry.options.Equal(&options) {
|
||||
for _, msg := range entry.msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
return entry.ast, entry.ok
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
tempLog := logger.NewDeferLog(logger.DeferLogAll)
|
||||
ast, ok := js_parser.Parse(tempLog, source, options)
|
||||
msgs := tempLog.Done()
|
||||
for _, msg := range msgs {
|
||||
log.AddMsg(msg)
|
||||
}
|
||||
|
||||
// Create the cache entry
|
||||
entry = &jsCacheEntry{
|
||||
source: source,
|
||||
options: options,
|
||||
ast: ast,
|
||||
ok: ok,
|
||||
msgs: msgs,
|
||||
}
|
||||
|
||||
// Save for next time
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.entries[source.KeyPath] = entry
|
||||
return ast, ok
|
||||
}
|
52
vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go
generated
vendored
Normal file
52
vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/evanw/esbuild/internal/fs"
|
||||
)
|
||||
|
||||
// This cache uses information from the "stat" syscall to try to avoid re-
|
||||
// reading files from the file system during subsequent builds if the file
|
||||
// hasn't changed. The assumption is reading the file metadata is faster than
|
||||
// reading the file contents.
|
||||
|
||||
type FSCache struct {
|
||||
mutex sync.Mutex
|
||||
entries map[string]*fsEntry
|
||||
}
|
||||
|
||||
type fsEntry struct {
|
||||
contents string
|
||||
modKey fs.ModKey
|
||||
isModKeyUsable bool
|
||||
}
|
||||
|
||||
func (c *FSCache) ReadFile(fs fs.FS, path string) (contents string, canonicalError error, originalError error) {
|
||||
entry := func() *fsEntry {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.entries[path]
|
||||
}()
|
||||
|
||||
// If the file's modification key hasn't changed since it was cached, assume
|
||||
// the contents of the file are also the same and skip reading the file.
|
||||
modKey, modKeyErr := fs.ModKey(path)
|
||||
if entry != nil && entry.isModKeyUsable && modKeyErr == nil && entry.modKey == modKey {
|
||||
return entry.contents, nil, nil
|
||||
}
|
||||
|
||||
contents, err, originalError := fs.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err, originalError
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.entries[path] = &fsEntry{
|
||||
contents: contents,
|
||||
modKey: modKey,
|
||||
isModKeyUsable: modKeyErr == nil,
|
||||
}
|
||||
return contents, nil, nil
|
||||
}
|
Reference in New Issue
Block a user