Add support for copying files and folders.

This commit is contained in:
2021-12-19 14:31:57 +01:00
parent 161cb79b88
commit 311339685c
450 changed files with 232338 additions and 3 deletions

View File

@ -0,0 +1,256 @@
package css_parser
import (
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) commaToken() css_ast.Token {
t := css_ast.Token{
Kind: css_lexer.TComma,
Text: ",",
}
if !p.options.RemoveWhitespace {
t.Whitespace = css_ast.WhitespaceAfter
}
return t
}
func expandTokenQuad(tokens []css_ast.Token, allowedIdent string) (result [4]css_ast.Token, ok bool) {
n := len(tokens)
if n < 1 || n > 4 {
return
}
// Don't do this if we encounter any unexpected tokens such as "var()"
for i := 0; i < n; i++ {
if t := tokens[i]; !t.Kind.IsNumeric() && (t.Kind != css_lexer.TIdent || allowedIdent == "" || t.Text != allowedIdent) {
return
}
}
result[0] = tokens[0]
if n > 1 {
result[1] = tokens[1]
} else {
result[1] = result[0]
}
if n > 2 {
result[2] = tokens[2]
} else {
result[2] = result[0]
}
if n > 3 {
result[3] = tokens[3]
} else {
result[3] = result[1]
}
ok = true
return
}
func compactTokenQuad(a css_ast.Token, b css_ast.Token, c css_ast.Token, d css_ast.Token, removeWhitespace bool) []css_ast.Token {
tokens := []css_ast.Token{a, b, c, d}
if tokens[3].EqualIgnoringWhitespace(tokens[1]) {
if tokens[2].EqualIgnoringWhitespace(tokens[0]) {
if tokens[1].EqualIgnoringWhitespace(tokens[0]) {
tokens = tokens[:1]
} else {
tokens = tokens[:2]
}
} else {
tokens = tokens[:3]
}
}
for i := range tokens {
var whitespace css_ast.WhitespaceFlags
if !removeWhitespace || i > 0 {
whitespace |= css_ast.WhitespaceBefore
}
if i+1 < len(tokens) {
whitespace |= css_ast.WhitespaceAfter
}
tokens[i].Whitespace = whitespace
}
return tokens
}
func (p *parser) processDeclarations(rules []css_ast.Rule) []css_ast.Rule {
margin := boxTracker{key: css_ast.DMargin, keyText: "margin", allowAuto: true}
padding := boxTracker{key: css_ast.DPadding, keyText: "padding", allowAuto: false}
inset := boxTracker{key: css_ast.DInset, keyText: "inset", allowAuto: true}
borderRadius := borderRadiusTracker{}
for i, rule := range rules {
decl, ok := rule.Data.(*css_ast.RDeclaration)
if !ok {
continue
}
switch decl.Key {
case css_ast.DBackgroundColor,
css_ast.DBorderBlockEndColor,
css_ast.DBorderBlockStartColor,
css_ast.DBorderBottomColor,
css_ast.DBorderColor,
css_ast.DBorderInlineEndColor,
css_ast.DBorderInlineStartColor,
css_ast.DBorderLeftColor,
css_ast.DBorderRightColor,
css_ast.DBorderTopColor,
css_ast.DCaretColor,
css_ast.DColor,
css_ast.DColumnRuleColor,
css_ast.DFill,
css_ast.DFloodColor,
css_ast.DLightingColor,
css_ast.DOutlineColor,
css_ast.DStopColor,
css_ast.DStroke,
css_ast.DTextDecorationColor,
css_ast.DTextEmphasisColor:
if len(decl.Value) == 1 {
decl.Value[0] = p.lowerColor(decl.Value[0])
if p.options.MangleSyntax {
t := decl.Value[0]
if hex, ok := parseColor(t); ok {
decl.Value[0] = p.mangleColor(t, hex)
}
}
}
case css_ast.DFont:
if p.options.MangleSyntax {
decl.Value = p.mangleFont(decl.Value)
}
case css_ast.DFontFamily:
if p.options.MangleSyntax {
if value, ok := p.mangleFontFamily(decl.Value); ok {
decl.Value = value
}
}
case css_ast.DFontWeight:
if len(decl.Value) == 1 && p.options.MangleSyntax {
decl.Value[0] = p.mangleFontWeight(decl.Value[0])
}
case css_ast.DTransform:
if p.options.MangleSyntax {
decl.Value = p.mangleTransforms(decl.Value)
}
case css_ast.DBoxShadow:
if p.options.MangleSyntax {
decl.Value = p.mangleBoxShadows(decl.Value)
}
// Margin
case css_ast.DMargin:
if p.options.MangleSyntax {
margin.mangleSides(rules, decl, i, p.options.RemoveWhitespace)
}
case css_ast.DMarginTop:
if p.options.MangleSyntax {
margin.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxTop)
}
case css_ast.DMarginRight:
if p.options.MangleSyntax {
margin.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxRight)
}
case css_ast.DMarginBottom:
if p.options.MangleSyntax {
margin.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxBottom)
}
case css_ast.DMarginLeft:
if p.options.MangleSyntax {
margin.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxLeft)
}
// Padding
case css_ast.DPadding:
if p.options.MangleSyntax {
padding.mangleSides(rules, decl, i, p.options.RemoveWhitespace)
}
case css_ast.DPaddingTop:
if p.options.MangleSyntax {
padding.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxTop)
}
case css_ast.DPaddingRight:
if p.options.MangleSyntax {
padding.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxRight)
}
case css_ast.DPaddingBottom:
if p.options.MangleSyntax {
padding.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxBottom)
}
case css_ast.DPaddingLeft:
if p.options.MangleSyntax {
padding.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxLeft)
}
// Inset
case css_ast.DInset:
if !p.options.UnsupportedCSSFeatures.Has(compat.InsetProperty) && p.options.MangleSyntax {
inset.mangleSides(rules, decl, i, p.options.RemoveWhitespace)
}
case css_ast.DTop:
if !p.options.UnsupportedCSSFeatures.Has(compat.InsetProperty) && p.options.MangleSyntax {
inset.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxTop)
}
case css_ast.DRight:
if !p.options.UnsupportedCSSFeatures.Has(compat.InsetProperty) && p.options.MangleSyntax {
inset.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxRight)
}
case css_ast.DBottom:
if !p.options.UnsupportedCSSFeatures.Has(compat.InsetProperty) && p.options.MangleSyntax {
inset.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxBottom)
}
case css_ast.DLeft:
if !p.options.UnsupportedCSSFeatures.Has(compat.InsetProperty) && p.options.MangleSyntax {
inset.mangleSide(rules, decl, i, p.options.RemoveWhitespace, boxLeft)
}
// Border radius
case css_ast.DBorderRadius:
if p.options.MangleSyntax {
borderRadius.mangleCorners(rules, decl, i, p.options.RemoveWhitespace)
}
case css_ast.DBorderTopLeftRadius:
if p.options.MangleSyntax {
borderRadius.mangleCorner(rules, decl, i, p.options.RemoveWhitespace, borderRadiusTopLeft)
}
case css_ast.DBorderTopRightRadius:
if p.options.MangleSyntax {
borderRadius.mangleCorner(rules, decl, i, p.options.RemoveWhitespace, borderRadiusTopRight)
}
case css_ast.DBorderBottomRightRadius:
if p.options.MangleSyntax {
borderRadius.mangleCorner(rules, decl, i, p.options.RemoveWhitespace, borderRadiusBottomRight)
}
case css_ast.DBorderBottomLeftRadius:
if p.options.MangleSyntax {
borderRadius.mangleCorner(rules, decl, i, p.options.RemoveWhitespace, borderRadiusBottomLeft)
}
}
}
// Compact removed rules
if p.options.MangleSyntax {
end := 0
for _, rule := range rules {
if rule.Data != nil {
rules[end] = rule
end++
}
}
rules = rules[:end]
}
return rules
}

View File

@ -0,0 +1,213 @@
package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
const (
borderRadiusTopLeft = iota
borderRadiusTopRight
borderRadiusBottomRight
borderRadiusBottomLeft
)
type borderRadiusCorner struct {
firstToken css_ast.Token
secondToken css_ast.Token
unitSafety unitSafetyTracker
ruleIndex uint32 // The index of the originating rule in the rules array
wasSingleRule bool // True if the originating rule was just for this side
}
type borderRadiusTracker struct {
corners [4]borderRadiusCorner
important bool // True if all active rules were flagged as "!important"
}
func (borderRadius *borderRadiusTracker) updateCorner(rules []css_ast.Rule, corner int, new borderRadiusCorner) {
if old := borderRadius.corners[corner]; old.firstToken.Kind != css_lexer.TEndOfFile &&
(!new.wasSingleRule || old.wasSingleRule) &&
old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
rules[old.ruleIndex] = css_ast.Rule{}
}
borderRadius.corners[corner] = new
}
func (borderRadius *borderRadiusTracker) mangleCorners(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, removeWhitespace bool) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
tokens := decl.Value
beforeSplit := len(tokens)
afterSplit := len(tokens)
// Search for the single slash if present
for i, t := range tokens {
if t.Kind == css_lexer.TDelimSlash {
if beforeSplit == len(tokens) {
beforeSplit = i
afterSplit = i + 1
} else {
// Multiple slashes are an error
borderRadius.corners = [4]borderRadiusCorner{}
return
}
}
}
// Use a single tracker for the whole rule
unitSafety := unitSafetyTracker{}
for _, t := range tokens[:beforeSplit] {
unitSafety.includeUnitOf(t)
}
for _, t := range tokens[afterSplit:] {
unitSafety.includeUnitOf(t)
}
firstRadii, firstRadiiOk := expandTokenQuad(tokens[:beforeSplit], "")
lastRadii, lastRadiiOk := expandTokenQuad(tokens[afterSplit:], "")
// Stop now if the pattern wasn't matched
if !firstRadiiOk || (beforeSplit < afterSplit && !lastRadiiOk) {
borderRadius.corners = [4]borderRadiusCorner{}
return
}
// Handle the first radii
for corner, t := range firstRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: t,
secondToken: t,
unitSafety: unitSafety,
ruleIndex: uint32(index),
})
}
// Handle the last radii
if lastRadiiOk {
for corner, t := range lastRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.corners[corner].secondToken = t
}
}
// Success
borderRadius.compactRules(rules, decl.KeyRange, removeWhitespace)
}
func (borderRadius *borderRadiusTracker) mangleCorner(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, removeWhitespace bool, corner int) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
if tokens := decl.Value; (len(tokens) == 1 && tokens[0].Kind.IsNumeric()) ||
(len(tokens) == 2 && tokens[0].Kind.IsNumeric() && tokens[1].Kind.IsNumeric()) {
firstToken := tokens[0]
secondToken := firstToken
if len(tokens) == 2 {
secondToken = tokens[1]
}
// Check to see if these units are safe to use in every browser
unitSafety := unitSafetyTracker{}
unitSafety.includeUnitOf(firstToken)
unitSafety.includeUnitOf(secondToken)
// Only collapse "0unit" into "0" if the unit is safe
if unitSafety.status == unitSafe && firstToken.TurnLengthIntoNumberIfZero() {
tokens[0] = firstToken
}
if len(tokens) == 2 {
if unitSafety.status == unitSafe && secondToken.TurnLengthIntoNumberIfZero() {
tokens[1] = secondToken
}
// If both tokens are equal, merge them into one
if firstToken.EqualIgnoringWhitespace(secondToken) {
tokens[0].Whitespace &= ^css_ast.WhitespaceAfter
decl.Value = tokens[:1]
}
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: firstToken,
secondToken: secondToken,
unitSafety: unitSafety,
ruleIndex: uint32(index),
wasSingleRule: true,
})
borderRadius.compactRules(rules, decl.KeyRange, removeWhitespace)
} else {
borderRadius.corners = [4]borderRadiusCorner{}
}
}
func (borderRadius *borderRadiusTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, removeWhitespace bool) {
// All tokens must be present
if eof := css_lexer.TEndOfFile; borderRadius.corners[0].firstToken.Kind == eof || borderRadius.corners[1].firstToken.Kind == eof ||
borderRadius.corners[2].firstToken.Kind == eof || borderRadius.corners[3].firstToken.Kind == eof {
return
}
// All tokens must have the same unit
for _, side := range borderRadius.corners[1:] {
if !side.unitSafety.isSafeWith(borderRadius.corners[0].unitSafety) {
return
}
}
// Generate the most minimal representation
tokens := compactTokenQuad(
borderRadius.corners[0].firstToken,
borderRadius.corners[1].firstToken,
borderRadius.corners[2].firstToken,
borderRadius.corners[3].firstToken,
removeWhitespace,
)
secondTokens := compactTokenQuad(
borderRadius.corners[0].secondToken,
borderRadius.corners[1].secondToken,
borderRadius.corners[2].secondToken,
borderRadius.corners[3].secondToken,
removeWhitespace,
)
if !css_ast.TokensEqualIgnoringWhitespace(tokens, secondTokens) {
var whitespace css_ast.WhitespaceFlags
if !removeWhitespace {
whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
}
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: whitespace,
})
tokens = append(tokens, secondTokens...)
}
// Remove all of the existing declarations
rules[borderRadius.corners[0].ruleIndex] = css_ast.Rule{}
rules[borderRadius.corners[1].ruleIndex] = css_ast.Rule{}
rules[borderRadius.corners[2].ruleIndex] = css_ast.Rule{}
rules[borderRadius.corners[3].ruleIndex] = css_ast.Rule{}
// Insert the combined declaration where the last rule was
rules[borderRadius.corners[3].ruleIndex].Data = &css_ast.RDeclaration{
Key: css_ast.DBorderRadius,
KeyText: "border-radius",
Value: tokens,
KeyRange: keyRange,
Important: borderRadius.important,
}
}

View File

@ -0,0 +1,198 @@
package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
const (
boxTop = iota
boxRight
boxBottom
boxLeft
)
type boxSide struct {
token css_ast.Token
unitSafety unitSafetyTracker
ruleIndex uint32 // The index of the originating rule in the rules array
wasSingleRule bool // True if the originating rule was just for this side
}
type boxTracker struct {
key css_ast.D
keyText string
allowAuto bool // If true, allow the "auto" keyword
sides [4]boxSide
important bool // True if all active rules were flagged as "!important"
}
type unitSafetyStatus uint8
const (
unitSafe unitSafetyStatus = iota // "margin: 0 1px 2cm 3%;"
unitUnsafeSingle // "margin: 0 1vw 2vw 3vw;"
unitUnsafeMixed // "margin: 0 1vw 2vh 3ch;"
)
// We can only compact rules together if they have the same unit safety level.
// We want to avoid a situation where the browser treats some of the original
// rules as valid and others as invalid.
//
// Safe:
// top: 1px; left: 0; bottom: 1px; right: 0;
// top: 1Q; left: 2Q; bottom: 3Q; right: 4Q;
//
// Unsafe:
// top: 1vh; left: 2vw; bottom: 3vh; right: 4vw;
// top: 1Q; left: 2Q; bottom: 3Q; right: 0;
// inset: 1Q 0 0 0; top: 0;
//
type unitSafetyTracker struct {
status unitSafetyStatus
unit string
}
func (a unitSafetyTracker) isSafeWith(b unitSafetyTracker) bool {
return a.status == b.status && a.status != unitUnsafeMixed && (a.status != unitUnsafeSingle || a.unit == b.unit)
}
func (t *unitSafetyTracker) includeUnitOf(token css_ast.Token) {
switch token.Kind {
case css_lexer.TNumber:
if token.Text == "0" {
return
}
case css_lexer.TPercentage:
return
case css_lexer.TDimension:
if token.DimensionUnitIsSafeLength() {
return
} else if unit := token.DimensionUnit(); t.status == unitSafe {
t.status = unitUnsafeSingle
t.unit = unit
return
} else if t.status == unitUnsafeSingle && t.unit == unit {
return
}
}
t.status = unitUnsafeMixed
}
func (box *boxTracker) updateSide(rules []css_ast.Rule, side int, new boxSide) {
if old := box.sides[side]; old.token.Kind != css_lexer.TEndOfFile &&
(!new.wasSingleRule || old.wasSingleRule) &&
old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
rules[old.ruleIndex] = css_ast.Rule{}
}
box.sides[side] = new
}
func (box *boxTracker) mangleSides(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, removeWhitespace bool) {
// Reset if we see a change in the "!important" flag
if box.important != decl.Important {
box.sides = [4]boxSide{}
box.important = decl.Important
}
allowedIdent := ""
if box.allowAuto {
allowedIdent = "auto"
}
if quad, ok := expandTokenQuad(decl.Value, allowedIdent); ok {
// Use a single tracker for the whole rule
unitSafety := unitSafetyTracker{}
for _, t := range quad {
if !box.allowAuto || t.Kind.IsNumeric() {
unitSafety.includeUnitOf(t)
}
}
for side, t := range quad {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
box.updateSide(rules, side, boxSide{
token: t,
ruleIndex: uint32(index),
unitSafety: unitSafety,
})
}
box.compactRules(rules, decl.KeyRange, removeWhitespace)
} else {
box.sides = [4]boxSide{}
}
}
func (box *boxTracker) mangleSide(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, removeWhitespace bool, side int) {
// Reset if we see a change in the "!important" flag
if box.important != decl.Important {
box.sides = [4]boxSide{}
box.important = decl.Important
}
if tokens := decl.Value; len(tokens) == 1 {
if t := tokens[0]; t.Kind.IsNumeric() || (t.Kind == css_lexer.TIdent && box.allowAuto && t.Text == "auto") {
unitSafety := unitSafetyTracker{}
if !box.allowAuto || t.Kind.IsNumeric() {
unitSafety.includeUnitOf(t)
}
if unitSafety.status == unitSafe && t.TurnLengthIntoNumberIfZero() {
tokens[0] = t
}
box.updateSide(rules, side, boxSide{
token: t,
ruleIndex: uint32(index),
wasSingleRule: true,
unitSafety: unitSafety,
})
box.compactRules(rules, decl.KeyRange, removeWhitespace)
return
}
}
box.sides = [4]boxSide{}
}
func (box *boxTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, removeWhitespace bool) {
// All tokens must be present
if eof := css_lexer.TEndOfFile; box.sides[0].token.Kind == eof || box.sides[1].token.Kind == eof ||
box.sides[2].token.Kind == eof || box.sides[3].token.Kind == eof {
return
}
// All tokens must have the same unit
for _, side := range box.sides[1:] {
if !side.unitSafety.isSafeWith(box.sides[0].unitSafety) {
return
}
}
// Generate the most minimal representation
tokens := compactTokenQuad(
box.sides[0].token,
box.sides[1].token,
box.sides[2].token,
box.sides[3].token,
removeWhitespace,
)
// Remove all of the existing declarations
rules[box.sides[0].ruleIndex] = css_ast.Rule{}
rules[box.sides[1].ruleIndex] = css_ast.Rule{}
rules[box.sides[2].ruleIndex] = css_ast.Rule{}
rules[box.sides[3].ruleIndex] = css_ast.Rule{}
// Insert the combined declaration where the last rule was
rules[box.sides[3].ruleIndex].Data = &css_ast.RDeclaration{
Key: box.key,
KeyText: box.keyText,
Value: tokens,
KeyRange: keyRange,
Important: box.important,
}
}

View File

@ -0,0 +1,103 @@
package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) mangleBoxShadow(tokens []css_ast.Token) []css_ast.Token {
insetCount := 0
colorCount := 0
numbersBegin := 0
numbersCount := 0
numbersDone := false
foundUnexpectedToken := false
for i, t := range tokens {
if t.Kind == css_lexer.TNumber || t.Kind == css_lexer.TDimension {
if numbersDone {
// Track if we found a non-number in between two numbers
foundUnexpectedToken = true
}
if t.TurnLengthIntoNumberIfZero() {
// "0px" => "0"
tokens[i] = t
}
if numbersCount == 0 {
// Track the index of the first number
numbersBegin = i
}
numbersCount++
} else {
if numbersCount != 0 {
// Track when we find a non-number after a number
numbersDone = true
}
if hex, ok := parseColor(t); ok {
colorCount++
tokens[i] = p.mangleColor(t, hex)
} else if t.Kind == css_lexer.TIdent && t.Text == "inset" {
insetCount++
} else {
// Track if we found a token other than a number, a color, or "inset"
foundUnexpectedToken = true
}
}
}
// If everything looks like a valid rule, trim trailing zeros off the numbers.
// There are three valid configurations of numbers:
//
// offset-x | offset-y
// offset-x | offset-y | blur-radius
// offset-x | offset-y | blur-radius | spread-radius
//
// If omitted, blur-radius and spread-radius are implied to be zero.
if insetCount <= 1 && colorCount <= 1 && numbersCount > 2 && numbersCount <= 4 && !foundUnexpectedToken {
numbersEnd := numbersBegin + numbersCount
for numbersCount > 2 && tokens[numbersBegin+numbersCount-1].IsZero() {
numbersCount--
}
tokens = append(tokens[:numbersBegin+numbersCount], tokens[numbersEnd:]...)
}
// Set the whitespace flags
for i := range tokens {
var whitespace css_ast.WhitespaceFlags
if i > 0 || !p.options.RemoveWhitespace {
whitespace |= css_ast.WhitespaceBefore
}
if i+1 < len(tokens) {
whitespace |= css_ast.WhitespaceAfter
}
tokens[i].Whitespace = whitespace
}
return tokens
}
func (p *parser) mangleBoxShadows(tokens []css_ast.Token) []css_ast.Token {
n := len(tokens)
end := 0
i := 0
for i < n {
// Find the comma or the end of the token list
comma := i
for comma < n && tokens[comma].Kind != css_lexer.TComma {
comma++
}
// Mangle this individual shadow
end += copy(tokens[end:], p.mangleBoxShadow(tokens[i:comma]))
// Skip over the comma
if comma < n {
tokens[end] = tokens[comma]
end++
comma++
}
i = comma
}
return tokens[:end]
}

View File

@ -0,0 +1,669 @@
package css_parser
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// These names are shorter than their hex codes
var shortColorName = map[uint32]string{
0x000080ff: "navy",
0x008000ff: "green",
0x008080ff: "teal",
0x4b0082ff: "indigo",
0x800000ff: "maroon",
0x800080ff: "purple",
0x808000ff: "olive",
0x808080ff: "gray",
0xa0522dff: "sienna",
0xa52a2aff: "brown",
0xc0c0c0ff: "silver",
0xcd853fff: "peru",
0xd2b48cff: "tan",
0xda70d6ff: "orchid",
0xdda0ddff: "plum",
0xee82eeff: "violet",
0xf0e68cff: "khaki",
0xf0ffffff: "azure",
0xf5deb3ff: "wheat",
0xf5f5dcff: "beige",
0xfa8072ff: "salmon",
0xfaf0e6ff: "linen",
0xff0000ff: "red",
0xff6347ff: "tomato",
0xff7f50ff: "coral",
0xffa500ff: "orange",
0xffc0cbff: "pink",
0xffd700ff: "gold",
0xffe4c4ff: "bisque",
0xfffafaff: "snow",
0xfffff0ff: "ivory",
}
var colorNameToHex = map[string]uint32{
"black": 0x000000ff,
"silver": 0xc0c0c0ff,
"gray": 0x808080ff,
"white": 0xffffffff,
"maroon": 0x800000ff,
"red": 0xff0000ff,
"purple": 0x800080ff,
"fuchsia": 0xff00ffff,
"green": 0x008000ff,
"lime": 0x00ff00ff,
"olive": 0x808000ff,
"yellow": 0xffff00ff,
"navy": 0x000080ff,
"blue": 0x0000ffff,
"teal": 0x008080ff,
"aqua": 0x00ffffff,
"orange": 0xffa500ff,
"aliceblue": 0xf0f8ffff,
"antiquewhite": 0xfaebd7ff,
"aquamarine": 0x7fffd4ff,
"azure": 0xf0ffffff,
"beige": 0xf5f5dcff,
"bisque": 0xffe4c4ff,
"blanchedalmond": 0xffebcdff,
"blueviolet": 0x8a2be2ff,
"brown": 0xa52a2aff,
"burlywood": 0xdeb887ff,
"cadetblue": 0x5f9ea0ff,
"chartreuse": 0x7fff00ff,
"chocolate": 0xd2691eff,
"coral": 0xff7f50ff,
"cornflowerblue": 0x6495edff,
"cornsilk": 0xfff8dcff,
"crimson": 0xdc143cff,
"cyan": 0x00ffffff,
"darkblue": 0x00008bff,
"darkcyan": 0x008b8bff,
"darkgoldenrod": 0xb8860bff,
"darkgray": 0xa9a9a9ff,
"darkgreen": 0x006400ff,
"darkgrey": 0xa9a9a9ff,
"darkkhaki": 0xbdb76bff,
"darkmagenta": 0x8b008bff,
"darkolivegreen": 0x556b2fff,
"darkorange": 0xff8c00ff,
"darkorchid": 0x9932ccff,
"darkred": 0x8b0000ff,
"darksalmon": 0xe9967aff,
"darkseagreen": 0x8fbc8fff,
"darkslateblue": 0x483d8bff,
"darkslategray": 0x2f4f4fff,
"darkslategrey": 0x2f4f4fff,
"darkturquoise": 0x00ced1ff,
"darkviolet": 0x9400d3ff,
"deeppink": 0xff1493ff,
"deepskyblue": 0x00bfffff,
"dimgray": 0x696969ff,
"dimgrey": 0x696969ff,
"dodgerblue": 0x1e90ffff,
"firebrick": 0xb22222ff,
"floralwhite": 0xfffaf0ff,
"forestgreen": 0x228b22ff,
"gainsboro": 0xdcdcdcff,
"ghostwhite": 0xf8f8ffff,
"gold": 0xffd700ff,
"goldenrod": 0xdaa520ff,
"greenyellow": 0xadff2fff,
"grey": 0x808080ff,
"honeydew": 0xf0fff0ff,
"hotpink": 0xff69b4ff,
"indianred": 0xcd5c5cff,
"indigo": 0x4b0082ff,
"ivory": 0xfffff0ff,
"khaki": 0xf0e68cff,
"lavender": 0xe6e6faff,
"lavenderblush": 0xfff0f5ff,
"lawngreen": 0x7cfc00ff,
"lemonchiffon": 0xfffacdff,
"lightblue": 0xadd8e6ff,
"lightcoral": 0xf08080ff,
"lightcyan": 0xe0ffffff,
"lightgoldenrodyellow": 0xfafad2ff,
"lightgray": 0xd3d3d3ff,
"lightgreen": 0x90ee90ff,
"lightgrey": 0xd3d3d3ff,
"lightpink": 0xffb6c1ff,
"lightsalmon": 0xffa07aff,
"lightseagreen": 0x20b2aaff,
"lightskyblue": 0x87cefaff,
"lightslategray": 0x778899ff,
"lightslategrey": 0x778899ff,
"lightsteelblue": 0xb0c4deff,
"lightyellow": 0xffffe0ff,
"limegreen": 0x32cd32ff,
"linen": 0xfaf0e6ff,
"magenta": 0xff00ffff,
"mediumaquamarine": 0x66cdaaff,
"mediumblue": 0x0000cdff,
"mediumorchid": 0xba55d3ff,
"mediumpurple": 0x9370dbff,
"mediumseagreen": 0x3cb371ff,
"mediumslateblue": 0x7b68eeff,
"mediumspringgreen": 0x00fa9aff,
"mediumturquoise": 0x48d1ccff,
"mediumvioletred": 0xc71585ff,
"midnightblue": 0x191970ff,
"mintcream": 0xf5fffaff,
"mistyrose": 0xffe4e1ff,
"moccasin": 0xffe4b5ff,
"navajowhite": 0xffdeadff,
"oldlace": 0xfdf5e6ff,
"olivedrab": 0x6b8e23ff,
"orangered": 0xff4500ff,
"orchid": 0xda70d6ff,
"palegoldenrod": 0xeee8aaff,
"palegreen": 0x98fb98ff,
"paleturquoise": 0xafeeeeff,
"palevioletred": 0xdb7093ff,
"papayawhip": 0xffefd5ff,
"peachpuff": 0xffdab9ff,
"peru": 0xcd853fff,
"pink": 0xffc0cbff,
"plum": 0xdda0ddff,
"powderblue": 0xb0e0e6ff,
"rosybrown": 0xbc8f8fff,
"royalblue": 0x4169e1ff,
"saddlebrown": 0x8b4513ff,
"salmon": 0xfa8072ff,
"sandybrown": 0xf4a460ff,
"seagreen": 0x2e8b57ff,
"seashell": 0xfff5eeff,
"sienna": 0xa0522dff,
"skyblue": 0x87ceebff,
"slateblue": 0x6a5acdff,
"slategray": 0x708090ff,
"slategrey": 0x708090ff,
"snow": 0xfffafaff,
"springgreen": 0x00ff7fff,
"steelblue": 0x4682b4ff,
"tan": 0xd2b48cff,
"thistle": 0xd8bfd8ff,
"tomato": 0xff6347ff,
"turquoise": 0x40e0d0ff,
"violet": 0xee82eeff,
"wheat": 0xf5deb3ff,
"whitesmoke": 0xf5f5f5ff,
"yellowgreen": 0x9acd32ff,
"rebeccapurple": 0x663399ff,
}
func parseHex(text string) (uint32, bool) {
hex := uint32(0)
for _, c := range text {
hex <<= 4
switch {
case c >= '0' && c <= '9':
hex |= uint32(c) - '0'
case c >= 'a' && c <= 'f':
hex |= uint32(c) - ('a' - 10)
case c >= 'A' && c <= 'F':
hex |= uint32(c) - ('A' - 10)
default:
return 0, false
}
}
return hex, true
}
// 0xAABBCCDD => 0xABCD
func compactHex(v uint32) uint32 {
return ((v & 0x0FF00000) >> 12) | ((v & 0x00000FF0) >> 4)
}
// 0xABCD => 0xAABBCCDD
func expandHex(v uint32) uint32 {
return ((v & 0xF000) << 16) | ((v & 0xFF00) << 12) | ((v & 0x0FF0) << 8) | ((v & 0x00FF) << 4) | (v & 0x000F)
}
func hexR(v uint32) int { return int(v >> 24) }
func hexG(v uint32) int { return int((v >> 16) & 255) }
func hexB(v uint32) int { return int((v >> 8) & 255) }
func hexA(v uint32) int { return int(v & 255) }
func floatToStringForColor(a float64) string {
text := fmt.Sprintf("%.03f", a)
for text[len(text)-1] == '0' {
text = text[:len(text)-1]
}
if text[len(text)-1] == '.' {
text = text[:len(text)-1]
}
return text
}
func degreesForAngle(token css_ast.Token) (float64, bool) {
switch token.Kind {
case css_lexer.TNumber:
if value, err := strconv.ParseFloat(token.Text, 64); err == nil {
return value, true
}
case css_lexer.TDimension:
if value, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
switch token.DimensionUnit() {
case "deg":
return value, true
case "grad":
return value * (360.0 / 400.0), true
case "rad":
return value * (180.0 / math.Pi), true
case "turn":
return value * 360.0, true
}
}
}
return 0, false
}
func lowerAlphaPercentageToNumber(token css_ast.Token) css_ast.Token {
if token.Kind == css_lexer.TPercentage {
if value, err := strconv.ParseFloat(token.Text[:len(token.Text)-1], 64); err == nil {
token.Kind = css_lexer.TNumber
token.Text = floatToStringForColor(value / 100.0)
}
}
return token
}
// Convert newer color syntax to older color syntax for older browsers
func (p *parser) lowerColor(token css_ast.Token) css_ast.Token {
text := token.Text
switch token.Kind {
case css_lexer.THash:
if p.options.UnsupportedCSSFeatures.Has(compat.HexRGBA) {
switch len(text) {
case 4:
// "#1234" => "rgba(1, 2, 3, 0.004)"
if hex, ok := parseHex(text); ok {
hex = expandHex(hex)
token.Kind = css_lexer.TFunction
token.Text = "rgba"
commaToken := p.commaToken()
token.Children = &[]css_ast.Token{
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexR(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexG(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexB(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: floatToStringForColor(float64(hexA(hex)) / 255)},
}
}
case 8:
// "#12345678" => "rgba(18, 52, 86, 0.47)"
if hex, ok := parseHex(text); ok {
token.Kind = css_lexer.TFunction
token.Text = "rgba"
commaToken := p.commaToken()
token.Children = &[]css_ast.Token{
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexR(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexG(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexB(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: floatToStringForColor(float64(hexA(hex)) / 255)},
}
}
}
}
case css_lexer.TIdent:
if text == "rebeccapurple" && p.options.UnsupportedCSSFeatures.Has(compat.RebeccaPurple) {
token.Kind = css_lexer.THash
token.Text = "663399"
}
case css_lexer.TFunction:
switch text {
case "rgb", "rgba", "hsl", "hsla":
if p.options.UnsupportedCSSFeatures.Has(compat.Modern_RGB_HSL) {
args := *token.Children
removeAlpha := false
addAlpha := false
// "hsl(1deg, 2%, 3%)" => "hsl(1, 2%, 3%)"
if (text == "hsl" || text == "hsla") && len(args) > 0 {
if degrees, ok := degreesForAngle(args[0]); ok {
args[0].Kind = css_lexer.TNumber
args[0].Text = floatToStringForColor(degrees)
}
}
// These check for "IsNumeric" to reject "var()" since a single "var()"
// can substitute for multiple tokens and that messes up pattern matching
switch len(args) {
case 3:
// "rgba(1 2 3)" => "rgb(1, 2, 3)"
// "hsla(1 2% 3%)" => "hsl(1, 2%, 3%)"
if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() {
removeAlpha = true
args[0].Whitespace = 0
args[1].Whitespace = 0
commaToken := p.commaToken()
token.Children = &[]css_ast.Token{
args[0], commaToken,
args[1], commaToken,
args[2],
}
}
case 5:
// "rgba(1, 2, 3)" => "rgb(1, 2, 3)"
// "hsla(1, 2%, 3%)" => "hsl(1%, 2%, 3%)"
if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
args[4].Kind.IsNumeric() {
removeAlpha = true
break
}
// "rgb(1 2 3 / 4%)" => "rgba(1, 2, 3, 0.04)"
// "hsl(1 2% 3% / 4%)" => "hsla(1, 2%, 3%, 0.04)"
if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() &&
args[3].Kind == css_lexer.TDelimSlash && args[4].Kind.IsNumeric() {
addAlpha = true
args[0].Whitespace = 0
args[1].Whitespace = 0
args[2].Whitespace = 0
commaToken := p.commaToken()
token.Children = &[]css_ast.Token{
args[0], commaToken,
args[1], commaToken,
args[2], commaToken,
lowerAlphaPercentageToNumber(args[4]),
}
}
case 7:
// "rgb(1%, 2%, 3%, 4%)" => "rgba(1%, 2%, 3%, 0.04)"
// "hsl(1, 2%, 3%, 4%)" => "hsla(1, 2%, 3%, 0.04)"
if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
args[4].Kind.IsNumeric() && args[5].Kind == css_lexer.TComma &&
args[6].Kind.IsNumeric() {
addAlpha = true
args[6] = lowerAlphaPercentageToNumber(args[6])
}
}
if removeAlpha {
if text == "rgba" {
token.Text = "rgb"
} else if text == "hsla" {
token.Text = "hsl"
}
} else if addAlpha {
if text == "rgb" {
token.Text = "rgba"
} else if text == "hsl" {
token.Text = "hsla"
}
}
}
}
}
return token
}
func parseColor(token css_ast.Token) (uint32, bool) {
text := token.Text
switch token.Kind {
case css_lexer.TIdent:
if hex, ok := colorNameToHex[strings.ToLower(text)]; ok {
return hex, true
}
case css_lexer.THash:
switch len(text) {
case 3:
// "#123"
if hex, ok := parseHex(text); ok {
return (expandHex(hex) << 8) | 0xFF, true
}
case 4:
// "#1234"
if hex, ok := parseHex(text); ok {
return expandHex(hex), true
}
case 6:
// "#112233"
if hex, ok := parseHex(text); ok {
return (hex << 8) | 0xFF, true
}
case 8:
// "#11223344"
if hex, ok := parseHex(text); ok {
return hex, true
}
}
case css_lexer.TFunction:
switch text {
case "rgb", "rgba":
args := *token.Children
var r, g, b, a css_ast.Token
switch len(args) {
case 3:
// "rgb(1 2 3)"
r, g, b = args[0], args[1], args[2]
case 5:
// "rgba(1, 2, 3)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
r, g, b = args[0], args[2], args[4]
break
}
// "rgb(1 2 3 / 4%)"
if args[3].Kind == css_lexer.TDelimSlash {
r, g, b, a = args[0], args[1], args[2], args[4]
}
case 7:
// "rgb(1%, 2%, 3%, 4%)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
r, g, b, a = args[0], args[2], args[4], args[6]
}
}
if r, ok := parseColorByte(r, 1); ok {
if g, ok := parseColorByte(g, 1); ok {
if b, ok := parseColorByte(b, 1); ok {
if a, ok := parseAlphaByte(a); ok {
return uint32((r << 24) | (g << 16) | (b << 8) | a), true
}
}
}
}
case "hsl", "hsla":
args := *token.Children
var h, s, l, a css_ast.Token
switch len(args) {
case 3:
// "hsl(1 2 3)"
h, s, l = args[0], args[1], args[2]
case 5:
// "hsla(1, 2, 3)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
h, s, l = args[0], args[2], args[4]
break
}
// "hsl(1 2 3 / 4%)"
if args[3].Kind == css_lexer.TDelimSlash {
h, s, l, a = args[0], args[1], args[2], args[4]
}
case 7:
// "hsl(1%, 2%, 3%, 4%)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
h, s, l, a = args[0], args[2], args[4], args[6]
}
}
// Convert from HSL to RGB. The algorithm is from the section
// "Converting HSL colors to sRGB colors" in the specification.
if h, ok := degreesForAngle(h); ok {
if s, ok := s.FractionForPercentage(); ok {
if l, ok := l.FractionForPercentage(); ok {
if a, ok := parseAlphaByte(a); ok {
h /= 360.0
var t2 float64
if l <= 0.5 {
t2 = l * (s + 1)
} else {
t2 = l + s - (l * s)
}
t1 := l*2 - t2
r := hueToRgb(t1, t2, h+1.0/3.0)
g := hueToRgb(t1, t2, h)
b := hueToRgb(t1, t2, h-1.0/3.0)
return uint32((r << 24) | (g << 16) | (b << 8) | a), true
}
}
}
}
}
}
return 0, false
}
func hueToRgb(t1 float64, t2 float64, hue float64) uint32 {
hue -= math.Floor(hue)
hue *= 6.0
var f float64
if hue < 1 {
f = (t2-t1)*hue + t1
} else if hue < 3 {
f = t2
} else if hue < 4 {
f = (t2-t1)*(4-hue) + t1
} else {
f = t1
}
i := int(math.Round(f * 255))
if i < 0 {
i = 0
} else if i > 255 {
i = 255
}
return uint32(i)
}
func parseAlphaByte(token css_ast.Token) (uint32, bool) {
if token.Kind == css_lexer.T(0) {
return 255, true
}
return parseColorByte(token, 255)
}
func parseColorByte(token css_ast.Token, scale float64) (uint32, bool) {
var i int
var ok bool
switch token.Kind {
case css_lexer.TNumber:
if f, err := strconv.ParseFloat(token.Text, 64); err == nil {
i = int(math.Round(f * scale))
ok = true
}
case css_lexer.TPercentage:
if f, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
i = int(math.Round(f * (255.0 / 100.0)))
ok = true
}
}
if i < 0 {
i = 0
} else if i > 255 {
i = 255
}
return uint32(i), ok
}
func (p *parser) mangleColor(token css_ast.Token, hex uint32) css_ast.Token {
// Note: Do NOT remove color information from fully transparent colors.
// Safari behaves differently than other browsers for color interpolation:
// https://css-tricks.com/thing-know-gradients-transparent-black/
if hexA(hex) == 255 {
token.Children = nil
if name, ok := shortColorName[hex]; ok {
token.Kind = css_lexer.TIdent
token.Text = name
} else {
token.Kind = css_lexer.THash
hex >>= 8
compact := compactHex(hex)
if hex == expandHex(compact) {
token.Text = fmt.Sprintf("%03x", compact)
} else {
token.Text = fmt.Sprintf("%06x", hex)
}
}
} else if !p.options.UnsupportedCSSFeatures.Has(compat.HexRGBA) {
token.Children = nil
token.Kind = css_lexer.THash
compact := compactHex(hex)
if hex == expandHex(compact) {
token.Text = fmt.Sprintf("%04x", compact)
} else {
token.Text = fmt.Sprintf("%08x", hex)
}
} else {
token.Kind = css_lexer.TFunction
token.Text = "rgba"
commaToken := p.commaToken()
index := hexA(hex) * 4
alpha := alphaFractionTable[index : index+4]
if space := strings.IndexByte(alpha, ' '); space != -1 {
alpha = alpha[:space]
}
token.Children = &[]css_ast.Token{
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexR(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexG(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: strconv.Itoa(hexB(hex))}, commaToken,
{Kind: css_lexer.TNumber, Text: alpha},
}
}
return token
}
// Every four characters in this table is the fraction for that index
const alphaFractionTable string = "" +
"0 .004.008.01 .016.02 .024.027.03 .035.04 .043.047.05 .055.06 " +
".063.067.07 .075.08 .082.086.09 .094.098.1 .106.11 .114.118.12 " +
".125.13 .133.137.14 .145.15 .153.157.16 .165.17 .173.176.18 .184" +
".19 .192.196.2 .204.208.21 .216.22 .224.227.23 .235.24 .243.247" +
".25 .255.26 .263.267.27 .275.28 .282.286.29 .294.298.3 .306.31 " +
".314.318.32 .325.33 .333.337.34 .345.35 .353.357.36 .365.37 .373" +
".376.38 .384.39 .392.396.4 .404.408.41 .416.42 .424.427.43 .435" +
".44 .443.447.45 .455.46 .463.467.47 .475.48 .482.486.49 .494.498" +
".5 .506.51 .514.518.52 .525.53 .533.537.54 .545.55 .553.557.56 " +
".565.57 .573.576.58 .584.59 .592.596.6 .604.608.61 .616.62 .624" +
".627.63 .635.64 .643.647.65 .655.66 .663.667.67 .675.68 .682.686" +
".69 .694.698.7 .706.71 .714.718.72 .725.73 .733.737.74 .745.75 " +
".753.757.76 .765.77 .773.776.78 .784.79 .792.796.8 .804.808.81 " +
".816.82 .824.827.83 .835.84 .843.847.85 .855.86 .863.867.87 .875" +
".88 .882.886.89 .894.898.9 .906.91 .914.918.92 .925.93 .933.937" +
".94 .945.95 .953.957.96 .965.97 .973.976.98 .984.99 .992.9961 "

View File

@ -0,0 +1,135 @@
package css_parser
import (
"strconv"
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// Specification: https://drafts.csswg.org/css-fonts/#font-prop
// [ <font-style> || <font-variant-css2> || <font-weight> || <font-stretch-css3> ]? <font-size> [ / <line-height> ]? <font-family>
func (p *parser) mangleFont(tokens []css_ast.Token) []css_ast.Token {
var result []css_ast.Token
// Scan up to the font size
pos := 0
for ; pos < len(tokens); pos++ {
token := tokens[pos]
if isFontSize(token) {
break
}
switch token.Kind {
case css_lexer.TIdent:
switch strings.ToLower(token.Text) {
case "normal":
// "All subproperties of the font property are first reset to their initial values"
// This implies that "normal" doesn't do anything. Also all of the optional values
// contain "normal" as an option and they are unordered so it's impossible to say
// what property "normal" corresponds to. Just drop these tokens to save space.
continue
// <font-style>
case "italic":
case "oblique":
if pos+1 < len(tokens) && tokens[pos+1].IsAngle() {
result = append(result, token, tokens[pos+1])
pos++
continue
}
// <font-variant-css2>
case "small-caps":
// <font-weight>
case "bold", "bolder", "lighter":
result = append(result, p.mangleFontWeight(token))
continue
// <font-stretch-css3>
case "ultra-condensed", "extra-condensed", "condensed", "semi-condensed",
"semi-expanded", "expanded", "extra-expanded", "ultra-expanded":
default:
// All other tokens are unrecognized, so we bail if we hit one
return tokens
}
result = append(result, token)
case css_lexer.TNumber:
// "Only values greater than or equal to 1, and less than or equal to
// 1000, are valid, and all other values are invalid."
if value, err := strconv.ParseFloat(token.Text, 64); err != nil || value < 1 || value > 1000 {
return tokens
}
result = append(result, token)
default:
// All other tokens are unrecognized, so we bail if we hit one
return tokens
}
}
// <font-size>
if pos == len(tokens) {
return tokens
}
result = append(result, tokens[pos])
pos++
// / <line-height>
if pos < len(tokens) && tokens[pos].Kind == css_lexer.TDelimSlash {
if pos+1 == len(tokens) {
return tokens
}
result = append(result, tokens[pos], tokens[pos+1])
pos += 2
// Remove the whitespace around the "/" character
if p.options.RemoveWhitespace {
result[len(result)-3].Whitespace &= ^css_ast.WhitespaceAfter
result[len(result)-2].Whitespace = 0
result[len(result)-1].Whitespace &= ^css_ast.WhitespaceBefore
}
}
// <font-family>
if family, ok := p.mangleFontFamily(tokens[pos:]); ok {
return append(result, family...)
}
return tokens
}
var fontSizeKeywords = map[string]bool{
// <absolute-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-absolute-size
"xx-small": true,
"x-small": true,
"small": true,
"medium": true,
"large": true,
"x-large": true,
"xx-large": true,
"xxx-large": true,
// <relative-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-relative-size
"larger": true,
"smaller": true,
}
// Specification: https://drafts.csswg.org/css-fonts/#font-size-prop
func isFontSize(token css_ast.Token) bool {
// <length-percentage>
if token.Kind == css_lexer.TDimension || token.Kind == css_lexer.TPercentage {
return true
}
// <absolute-size> or <relative-size>
if token.Kind == css_lexer.TIdent {
_, ok := fontSizeKeywords[strings.ToLower(token.Text)]
return ok
}
return false
}

View File

@ -0,0 +1,142 @@
package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// Specification: https://drafts.csswg.org/css-values-4/#common-keywords
var wideKeywords = map[string]bool{
"initial": true,
"inherit": true,
"unset": true,
}
// Specification: https://drafts.csswg.org/css-fonts/#generic-font-families
var genericFamilyNames = map[string]bool{
"serif": true,
"sans-serif": true,
"cursive": true,
"fantasy": true,
"monospace": true,
"system-ui": true,
"emoji": true,
"math": true,
"fangsong": true,
"ui-serif": true,
"ui-sans-serif": true,
"ui-monospace": true,
"ui-rounded": true,
}
// Specification: https://drafts.csswg.org/css-fonts/#font-family-prop
func (p *parser) mangleFontFamily(tokens []css_ast.Token) ([]css_ast.Token, bool) {
result, rest, ok := p.mangleFamilyNameOrGenericName(nil, tokens)
if !ok {
return nil, false
}
for len(rest) > 0 && rest[0].Kind == css_lexer.TComma {
result, rest, ok = p.mangleFamilyNameOrGenericName(append(result, rest[0]), rest[1:])
if !ok {
return nil, false
}
}
if len(rest) > 0 {
return nil, false
}
return result, true
}
func (p *parser) mangleFamilyNameOrGenericName(result []css_ast.Token, tokens []css_ast.Token) ([]css_ast.Token, []css_ast.Token, bool) {
if len(tokens) > 0 {
t := tokens[0]
// Handle <generic-family>
if t.Kind == css_lexer.TIdent && genericFamilyNames[t.Text] {
return append(result, t), tokens[1:], true
}
// Handle <family-name>
if t.Kind == css_lexer.TString {
// "If a sequence of identifiers is given as a <family-name>, the computed
// value is the name converted to a string by joining all the identifiers
// in the sequence by single spaces."
//
// More information: https://mathiasbynens.be/notes/unquoted-font-family
names := strings.Split(t.Text, " ")
for _, name := range names {
if !isValidCustomIdent(name, genericFamilyNames) {
return append(result, t), tokens[1:], true
}
}
for i, name := range names {
var whitespace css_ast.WhitespaceFlags
if i != 0 || !p.options.RemoveWhitespace {
whitespace = css_ast.WhitespaceBefore
}
result = append(result, css_ast.Token{
Kind: css_lexer.TIdent,
Text: name,
Whitespace: whitespace,
})
}
return result, tokens[1:], true
}
// "Font family names other than generic families must either be given
// quoted as <string>s, or unquoted as a sequence of one or more
// <custom-ident>."
if t.Kind == css_lexer.TIdent {
for {
if !isValidCustomIdent(t.Text, genericFamilyNames) {
return nil, nil, false
}
result = append(result, t)
tokens = tokens[1:]
if len(tokens) == 0 || tokens[0].Kind != css_lexer.TIdent {
break
}
t = tokens[0]
}
return result, tokens, true
}
}
// Anything other than the cases listed above causes us to bail
return nil, nil, false
}
// Specification: https://drafts.csswg.org/css-values-4/#custom-idents
func isValidCustomIdent(text string, predefinedKeywords map[string]bool) bool {
loweredText := strings.ToLower(text)
if predefinedKeywords[loweredText] {
return false
}
if wideKeywords[loweredText] {
return false
}
if loweredText == "default" {
return false
}
if loweredText == "" {
return false
}
// validate if it contains characters which needs to be escaped
if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
return false
}
for _, c := range text {
if !css_lexer.IsNameContinue(c) {
return false
}
}
return true
}

View File

@ -0,0 +1,25 @@
package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) mangleFontWeight(token css_ast.Token) css_ast.Token {
if token.Kind != css_lexer.TIdent {
return token
}
switch strings.ToLower(token.Text) {
case "normal":
token.Text = "400"
token.Kind = css_lexer.TNumber
case "bold":
token.Text = "700"
token.Kind = css_lexer.TNumber
}
return token
}

View File

@ -0,0 +1,391 @@
package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func turnPercentIntoNumberIfShorter(t *css_ast.Token) {
if t.Kind == css_lexer.TPercentage {
if shifted, ok := shiftDot(t.PercentageValue(), -2); ok && len(shifted) < len(t.Text) {
t.Kind = css_lexer.TNumber
t.Text = shifted
}
}
}
// https://www.w3.org/TR/css-transforms-1/#two-d-transform-functions
// https://drafts.csswg.org/css-transforms-2/#transform-functions
func (p *parser) mangleTransforms(tokens []css_ast.Token) []css_ast.Token {
for i := range tokens {
if token := &tokens[i]; token.Kind == css_lexer.TFunction {
if args := *token.Children; css_ast.TokensAreCommaSeparated(args) {
n := len(args)
switch strings.ToLower(token.Text) {
////////////////////////////////////////////////////////////////////////////////
// 2D transforms
case "matrix":
// specifies a 2D transformation in the form of a transformation
// matrix of the six values a, b, c, d, e, f.
if n == 11 {
// | a c 0 e |
// | b d 0 f |
// | 0 0 1 0 |
// | 0 0 0 1 |
a, b, c, d, e, f := args[0], args[2], args[4], args[6], args[8], args[10]
if b.IsZero() && c.IsZero() && e.IsZero() && f.IsZero() {
// | a 0 0 0 |
// | 0 d 0 0 |
// | 0 0 1 0 |
// | 0 0 0 1 |
if a.EqualIgnoringWhitespace(d) {
// "matrix(a, 0, 0, a, 0, 0)" => "scale(a)"
token.Text = "scale"
*token.Children = args[:1]
} else if d.IsOne() {
// "matrix(a, 0, 0, 1, 0, 0)" => "scaleX(a)"
token.Text = "scaleX"
*token.Children = args[:1]
} else if a.IsOne() {
// "matrix(1, 0, 0, d, 0, 0)" => "scaleY(d)"
token.Text = "scaleY"
*token.Children = args[6:7]
} else {
// "matrix(a, 0, 0, d, 0, 0)" => "scale(a, d)"
token.Text = "scale"
*token.Children = append(args[:2], d)
}
// Note: A "matrix" cannot be directly converted into a "translate"
// because "translate" requires units while "matrix" requires no
// units. I'm not sure exactly what the semantics are so I'm not
// sure if you can just add "px" or not. Even if that did work,
// you still couldn't substitute values containing "var()" since
// units would still not be substituted in that case.
}
}
case "translate":
// specifies a 2D translation by the vector [tx, ty], where tx is the
// first translation-value parameter and ty is the optional second
// translation-value parameter. If <ty> is not provided, ty has zero
// as a value.
if n == 1 {
args[0].TurnLengthOrPercentageIntoNumberIfZero()
} else if n == 3 {
tx, ty := &args[0], &args[2]
tx.TurnLengthOrPercentageIntoNumberIfZero()
ty.TurnLengthOrPercentageIntoNumberIfZero()
if ty.IsZero() {
// "translate(tx, 0)" => "translate(tx)"
*token.Children = args[:1]
} else if tx.IsZero() {
// "translate(0, ty)" => "translateY(ty)"
token.Text = "translateY"
*token.Children = args[2:]
}
}
case "translatex":
// specifies a translation by the given amount in the X direction.
if n == 1 {
// "translateX(tx)" => "translate(tx)"
token.Text = "translate"
args[0].TurnLengthOrPercentageIntoNumberIfZero()
}
case "translatey":
// specifies a translation by the given amount in the Y direction.
if n == 1 {
args[0].TurnLengthOrPercentageIntoNumberIfZero()
}
case "scale":
// specifies a 2D scale operation by the [sx,sy] scaling vector
// described by the 2 parameters. If the second parameter is not
// provided, it takes a value equal to the first. For example,
// scale(1, 1) would leave an element unchanged, while scale(2, 2)
// would cause it to appear twice as long in both the X and Y axes,
// or four times its typical geometric size.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
} else if n == 3 {
sx, sy := &args[0], &args[2]
turnPercentIntoNumberIfShorter(sx)
turnPercentIntoNumberIfShorter(sy)
if sx.EqualIgnoringWhitespace(*sy) {
// "scale(s, s)" => "scale(s)"
*token.Children = args[:1]
} else if sy.IsOne() {
// "scale(s, 1)" => "scaleX(s)"
token.Text = "scaleX"
*token.Children = args[:1]
} else if sx.IsOne() {
// "scale(1, s)" => "scaleY(s)"
token.Text = "scaleY"
*token.Children = args[2:]
}
}
case "scalex":
// specifies a 2D scale operation using the [sx,1] scaling vector,
// where sx is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "scaley":
// specifies a 2D scale operation using the [1,sy] scaling vector,
// where sy is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "rotate":
// specifies a 2D rotation by the angle specified in the parameter
// about the origin of the element, as defined by the
// transform-origin property. For example, rotate(90deg) would
// cause elements to appear rotated one-quarter of a turn in the
// clockwise direction.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "skew":
// specifies a 2D skew by [ax,ay] for X and Y. If the second
// parameter is not provided, it has a zero value.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
} else if n == 3 {
ax, ay := &args[0], &args[2]
ax.TurnLengthIntoNumberIfZero()
ay.TurnLengthIntoNumberIfZero()
if ay.IsZero() {
// "skew(ax, 0)" => "skew(ax)"
*token.Children = args[:1]
}
}
case "skewx":
// specifies a 2D skew transformation along the X axis by the given
// angle.
if n == 1 {
// "skewX(ax)" => "skew(ax)"
token.Text = "skew"
args[0].TurnLengthIntoNumberIfZero()
}
case "skewy":
// specifies a 2D skew transformation along the Y axis by the given
// angle.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
////////////////////////////////////////////////////////////////////////////////
// 3D transforms
case "matrix3d":
// specifies a 3D transformation as a 4x4 homogeneous matrix of 16
// values in column-major order.
if n == 31 {
// | m0 m4 m8 m12 |
// | m1 m5 m9 m13 |
// | m2 m6 m10 m14 |
// | m3 m7 m11 m15 |
mask := uint32(0)
for i := 0; i < 16; i++ {
if arg := args[i*2]; arg.IsZero() {
mask |= 1 << i
} else if arg.IsOne() {
mask |= (1 << 16) << i
}
}
const onlyScale = 0b1000_0000_0000_0000_0111_1011_1101_1110
const only2D = 0b1000_0100_0000_0000_0100_1011_1100_1100
if (mask & onlyScale) == onlyScale {
// | m0 0 0 0 |
// | 0 m5 0 0 |
// | 0 0 m10 0 |
// | 0 0 0 1 |
sx, sy, sz := args[0], args[10], args[20]
if sx.EqualIgnoringWhitespace(sy) && sz.IsOne() {
token.Text = "scale"
*token.Children = args[:1]
} else if sy.IsOne() && sz.IsOne() {
token.Text = "scaleX"
*token.Children = args[:1]
} else if sx.IsOne() && sz.IsOne() {
token.Text = "scaleY"
*token.Children = args[10:11]
} else if sx.IsOne() && sy.IsOne() {
token.Text = "scaleZ"
*token.Children = args[20:21]
} else if sz.IsOne() {
token.Text = "scale"
*token.Children = append(args[0:2], args[10])
} else {
token.Text = "scale3d"
*token.Children = append(append(args[0:2], args[10:12]...), args[20])
}
} else if (mask & only2D) == only2D {
// | m0 m4 0 m12 |
// | m1 m5 0 m13 |
// | 0 0 1 0 |
// | 0 0 0 1 |
token.Text = "matrix"
*token.Children = append(append(args[0:4], args[8:12]...), args[24:27]...)
}
// Note: A "matrix3d" cannot be directly converted into a "translate3d"
// because "translate3d" requires units while "matrix3d" requires no
// units. I'm not sure exactly what the semantics are so I'm not
// sure if you can just add "px" or not. Even if that did work,
// you still couldn't substitute values containing "var()" since
// units would still not be substituted in that case.
}
case "translate3d":
// specifies a 3D translation by the vector [tx,ty,tz], with tx,
// ty and tz being the first, second and third translation-value
// parameters respectively.
if n == 5 {
tx, ty, tz := &args[0], &args[2], &args[4]
tx.TurnLengthOrPercentageIntoNumberIfZero()
ty.TurnLengthOrPercentageIntoNumberIfZero()
tz.TurnLengthIntoNumberIfZero()
if ty.IsZero() && tz.IsZero() {
// "translate3d(tx, 0, 0)" => "translate(tx)"
token.Text = "translate"
*token.Children = args[:1]
} else if tx.IsZero() && tz.IsZero() {
// "translate3d(0, ty, 0)" => "translateY(ty)"
token.Text = "translateY"
*token.Children = args[2:3]
} else if tx.IsZero() && ty.IsZero() {
// "translate3d(0, 0, tz)" => "translateZ(tz)"
token.Text = "translateZ"
*token.Children = args[4:]
} else if tz.IsZero() {
// "translate3d(tx, ty, 0)" => "translate(tx, ty)"
token.Text = "translate"
*token.Children = args[:3]
}
}
case "translatez":
// specifies a 3D translation by the vector [0,0,tz] with the given
// amount in the Z direction.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "scale3d":
// specifies a 3D scale operation by the [sx,sy,sz] scaling vector
// described by the 3 parameters.
if n == 5 {
sx, sy, sz := &args[0], &args[2], &args[4]
turnPercentIntoNumberIfShorter(sx)
turnPercentIntoNumberIfShorter(sy)
turnPercentIntoNumberIfShorter(sz)
if sx.EqualIgnoringWhitespace(*sy) && sz.IsOne() {
// "scale3d(s, s, 1)" => "scale(s)"
token.Text = "scale"
*token.Children = args[:1]
} else if sy.IsOne() && sz.IsOne() {
// "scale3d(sx, 1, 1)" => "scaleX(sx)"
token.Text = "scaleX"
*token.Children = args[:1]
} else if sx.IsOne() && sz.IsOne() {
// "scale3d(1, sy, 1)" => "scaleY(sy)"
token.Text = "scaleY"
*token.Children = args[2:3]
} else if sx.IsOne() && sy.IsOne() {
// "scale3d(1, 1, sz)" => "scaleZ(sz)"
token.Text = "scaleZ"
*token.Children = args[4:]
} else if sz.IsOne() {
// "scale3d(sx, sy, 1)" => "scale(sx, sy)"
token.Text = "scale"
*token.Children = args[:3]
}
}
case "scalez":
// specifies a 3D scale operation using the [1,1,sz] scaling vector,
// where sz is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "rotate3d":
// specifies a 3D rotation by the angle specified in last parameter
// about the [x,y,z] direction vector described by the first three
// parameters. A direction vector that cannot be normalized, such as
// [0,0,0], will cause the rotation to not be applied.
if n == 7 {
x, y, z, angle := &args[0], &args[2], &args[4], &args[6]
angle.TurnLengthIntoNumberIfZero()
if x.IsOne() && y.IsZero() && z.IsZero() {
// "rotate3d(1, 0, 0, angle)" => "rotateX(angle)"
token.Text = "rotateX"
*token.Children = args[6:]
} else if x.IsZero() && y.IsOne() && z.IsZero() {
// "rotate3d(0, 1, 0, angle)" => "rotateY(angle)"
token.Text = "rotateY"
*token.Children = args[6:]
} else if x.IsZero() && y.IsZero() && z.IsOne() {
// "rotate3d(0, 0, 1, angle)" => "rotate(angle)"
token.Text = "rotate"
*token.Children = args[6:]
}
}
case "rotatex":
// same as rotate3d(1, 0, 0, <angle>).
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "rotatey":
// same as rotate3d(0, 1, 0, <angle>).
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "rotatez":
// same as rotate3d(0, 0, 1, <angle>), which is a 3d transform
// equivalent to the 2d transform rotate(<angle>).
if n == 1 {
// "rotateZ(angle)" => "rotate(angle)"
token.Text = "rotate"
args[0].TurnLengthIntoNumberIfZero()
}
case "perspective":
// specifies a perspective projection matrix. This matrix scales
// points in X and Y based on their Z value, scaling points with
// positive Z values away from the origin, and those with negative Z
// values towards the origin. Points on the z=0 plane are unchanged.
// The parameter represents the distance of the z=0 plane from the
// viewer.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
}
// Trim whitespace at the ends
if args := *token.Children; len(args) > 0 {
args[0].Whitespace &= ^css_ast.WhitespaceBefore
args[len(args)-1].Whitespace &= ^css_ast.WhitespaceAfter
}
}
}
}
return tokens
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) parseSelectorList() (list []css_ast.ComplexSelector, ok bool) {
// Parse the first selector
p.eat(css_lexer.TWhitespace)
sel, good := p.parseComplexSelector()
if !good {
return
}
list = append(list, sel)
// Parse the remaining selectors
for {
p.eat(css_lexer.TWhitespace)
if !p.eat(css_lexer.TComma) {
break
}
p.eat(css_lexer.TWhitespace)
sel, good := p.parseComplexSelector()
if !good {
return
}
list = append(list, sel)
}
ok = true
return
}
func (p *parser) parseComplexSelector() (result css_ast.ComplexSelector, ok bool) {
// Parent
sel, good := p.parseCompoundSelector()
if !good {
return
}
result.Selectors = append(result.Selectors, sel)
for {
p.eat(css_lexer.TWhitespace)
if p.peek(css_lexer.TEndOfFile) || p.peek(css_lexer.TComma) || p.peek(css_lexer.TOpenBrace) {
break
}
// Optional combinator
combinator := p.parseCombinator()
if combinator != "" {
p.eat(css_lexer.TWhitespace)
}
// Child
sel, good := p.parseCompoundSelector()
if !good {
return
}
sel.Combinator = combinator
result.Selectors = append(result.Selectors, sel)
}
ok = true
return
}
func (p *parser) nameToken() css_ast.NameToken {
return css_ast.NameToken{
Kind: p.current().Kind,
Text: p.decoded(),
}
}
func (p *parser) parseCompoundSelector() (sel css_ast.CompoundSelector, ok bool) {
// This is an extension: https://drafts.csswg.org/css-nesting-1/
if p.eat(css_lexer.TDelimAmpersand) {
sel.HasNestPrefix = true
}
// Parse the type selector
switch p.current().Kind {
case css_lexer.TDelimBar, css_lexer.TIdent, css_lexer.TDelimAsterisk:
nsName := css_ast.NamespacedName{}
if !p.peek(css_lexer.TDelimBar) {
nsName.Name = p.nameToken()
p.advance()
} else {
// Hack: Create an empty "identifier" to represent this
nsName.Name.Kind = css_lexer.TIdent
}
if p.eat(css_lexer.TDelimBar) {
if !p.peek(css_lexer.TIdent) && !p.peek(css_lexer.TDelimAsterisk) {
p.expect(css_lexer.TIdent)
return
}
prefix := nsName.Name
nsName.NamespacePrefix = &prefix
nsName.Name = p.nameToken()
p.advance()
}
sel.TypeSelector = &nsName
}
// Parse the subclass selectors
subclassSelectors:
for {
switch p.current().Kind {
case css_lexer.THash:
if !p.current().IsID {
break subclassSelectors
}
name := p.decoded()
sel.SubclassSelectors = append(sel.SubclassSelectors, &css_ast.SSHash{Name: name})
p.advance()
case css_lexer.TDelimDot:
p.advance()
name := p.decoded()
sel.SubclassSelectors = append(sel.SubclassSelectors, &css_ast.SSClass{Name: name})
p.expect(css_lexer.TIdent)
case css_lexer.TOpenBracket:
p.advance()
attr, good := p.parseAttributeSelector()
if !good {
return
}
sel.SubclassSelectors = append(sel.SubclassSelectors, &attr)
case css_lexer.TColon:
if p.next().Kind == css_lexer.TColon {
// Special-case the start of the pseudo-element selector section
for p.current().Kind == css_lexer.TColon {
isElement := p.next().Kind == css_lexer.TColon
if isElement {
p.advance()
}
pseudo := p.parsePseudoClassSelector()
// https://www.w3.org/TR/selectors-4/#single-colon-pseudos
// The four Level 2 pseudo-elements (::before, ::after, ::first-line,
// and ::first-letter) may, for legacy reasons, be represented using
// the <pseudo-class-selector> grammar, with only a single ":"
// character at their start.
if p.options.MangleSyntax && isElement && len(pseudo.Args) == 0 {
switch pseudo.Name {
case "before", "after", "first-line", "first-letter":
isElement = false
}
}
pseudo.IsElement = isElement
sel.SubclassSelectors = append(sel.SubclassSelectors, &pseudo)
}
break subclassSelectors
}
pseudo := p.parsePseudoClassSelector()
sel.SubclassSelectors = append(sel.SubclassSelectors, &pseudo)
default:
break subclassSelectors
}
}
// The compound selector must be non-empty
if !sel.HasNestPrefix && sel.TypeSelector == nil && len(sel.SubclassSelectors) == 0 {
p.unexpected()
return
}
ok = true
return
}
func (p *parser) parseAttributeSelector() (attr css_ast.SSAttribute, ok bool) {
// Parse the namespaced name
switch p.current().Kind {
case css_lexer.TDelimBar, css_lexer.TDelimAsterisk:
// "[|x]"
// "[*|x]"
if p.peek(css_lexer.TDelimAsterisk) {
prefix := p.nameToken()
p.advance()
attr.NamespacedName.NamespacePrefix = &prefix
} else {
// "[|attr]" is equivalent to "[attr]". From the specification:
// "In keeping with the Namespaces in the XML recommendation, default
// namespaces do not apply to attributes, therefore attribute selectors
// without a namespace component apply only to attributes that have no
// namespace (equivalent to |attr)."
}
if !p.expect(css_lexer.TDelimBar) {
return
}
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
default:
// "[x]"
// "[x|y]"
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
if p.next().Kind != css_lexer.TDelimEquals && p.eat(css_lexer.TDelimBar) {
prefix := attr.NamespacedName.Name
attr.NamespacedName.NamespacePrefix = &prefix
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
}
}
// Parse the optional matcher operator
p.eat(css_lexer.TWhitespace)
if p.eat(css_lexer.TDelimEquals) {
attr.MatcherOp = "="
} else {
switch p.current().Kind {
case css_lexer.TDelimTilde:
attr.MatcherOp = "~="
case css_lexer.TDelimBar:
attr.MatcherOp = "|="
case css_lexer.TDelimCaret:
attr.MatcherOp = "^="
case css_lexer.TDelimDollar:
attr.MatcherOp = "$="
case css_lexer.TDelimAsterisk:
attr.MatcherOp = "*="
}
if attr.MatcherOp != "" {
p.advance()
p.expect(css_lexer.TDelimEquals)
}
}
// Parse the optional matcher value
if attr.MatcherOp != "" {
p.eat(css_lexer.TWhitespace)
if !p.peek(css_lexer.TString) && !p.peek(css_lexer.TIdent) {
p.unexpected()
}
attr.MatcherValue = p.decoded()
p.advance()
p.eat(css_lexer.TWhitespace)
if p.peek(css_lexer.TIdent) {
if modifier := p.decoded(); len(modifier) == 1 {
if c := modifier[0]; c == 'i' || c == 'I' || c == 's' || c == 'S' {
attr.MatcherModifier = c
p.advance()
}
}
}
}
p.expect(css_lexer.TCloseBracket)
ok = true
return
}
func (p *parser) parsePseudoClassSelector() css_ast.SSPseudoClass {
p.advance()
if p.peek(css_lexer.TFunction) {
text := p.decoded()
p.advance()
args := p.convertTokens(p.parseAnyValue())
p.expect(css_lexer.TCloseParen)
return css_ast.SSPseudoClass{Name: text, Args: args}
}
name := p.decoded()
sel := css_ast.SSPseudoClass{}
if p.expect(css_lexer.TIdent) {
sel.Name = name
}
return sel
}
func (p *parser) parseAnyValue() []css_lexer.Token {
// Reference: https://drafts.csswg.org/css-syntax-3/#typedef-declaration-value
p.stack = p.stack[:0] // Reuse allocated memory
start := p.index
loop:
for {
switch p.current().Kind {
case css_lexer.TCloseParen, css_lexer.TCloseBracket, css_lexer.TCloseBrace:
last := len(p.stack) - 1
if last < 0 || !p.peek(p.stack[last]) {
break loop
}
p.stack = p.stack[:last]
case css_lexer.TSemicolon, css_lexer.TDelimExclamation:
if len(p.stack) == 0 {
break loop
}
case css_lexer.TOpenParen, css_lexer.TFunction:
p.stack = append(p.stack, css_lexer.TCloseParen)
case css_lexer.TOpenBracket:
p.stack = append(p.stack, css_lexer.TCloseBracket)
case css_lexer.TOpenBrace:
p.stack = append(p.stack, css_lexer.TCloseBrace)
}
p.advance()
}
tokens := p.tokens[start:p.index]
if len(tokens) == 0 {
p.unexpected()
}
return tokens
}
func (p *parser) parseCombinator() string {
switch p.current().Kind {
case css_lexer.TDelimGreaterThan:
p.advance()
return ">"
case css_lexer.TDelimPlus:
p.advance()
return "+"
case css_lexer.TDelimTilde:
p.advance()
return "~"
default:
return ""
}
}

View File

@ -0,0 +1,575 @@
package css_parser
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) tryToReduceCalcExpression(token css_ast.Token) css_ast.Token {
if term := tryToParseCalcTerm(*token.Children); term != nil {
whitespace := css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
if p.options.RemoveWhitespace {
whitespace = 0
}
term = term.partiallySimplify()
if result, ok := term.convertToToken(whitespace); ok {
if result.Kind == css_lexer.TOpenParen {
result.Kind = css_lexer.TFunction
result.Text = "calc"
}
return result
}
}
return token
}
// See: https://www.w3.org/TR/css-values-4/#calc-internal
type calcTerm interface {
convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool)
partiallySimplify() calcTerm
}
type calcSum struct {
terms []calcTerm
}
type calcProduct struct {
terms []calcTerm
}
type calcNegate struct {
term calcTerm
}
type calcInvert struct {
term calcTerm
}
type calcNumeric struct {
number float64
unit string
}
type calcValue struct {
token css_ast.Token
isInvalidPlusOrMinus bool
}
func floatToStringForCalc(a float64) (string, bool) {
// Handle non-finite cases
if math.IsNaN(a) || math.IsInf(a, 0) {
return "", false
}
// Print the number as a string
text := fmt.Sprintf("%.05f", a)
for text[len(text)-1] == '0' {
text = text[:len(text)-1]
}
if text[len(text)-1] == '.' {
text = text[:len(text)-1]
}
if strings.HasPrefix(text, "0.") {
text = text[1:]
} else if strings.HasPrefix(text, "-0.") {
text = "-" + text[2:]
}
// Bail if the number is not exactly represented
if number, err := strconv.ParseFloat(text, 64); err != nil || number != a {
return "", false
}
return text, true
}
func (c *calcSum) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
tokens := make([]css_ast.Token, 0, len(c.terms)*2)
// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
if product, ok := c.terms[0].(*calcProduct); ok {
token, ok := product.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, *token.Children...)
} else {
token, ok := c.terms[0].convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
}
for _, term := range c.terms[1:] {
// If child is a Negate node, append " - " to s, then serialize the Negates child and append the result to s.
if negate, ok := term.(*calcNegate); ok {
token, ok := negate.term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimMinus,
Text: "-",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
}, token)
continue
}
// If child is a negative numeric value, append " - " to s, then serialize the negation of child as normal and append the result to s.
if numeric, ok := term.(*calcNumeric); ok && numeric.number < 0 {
clone := *numeric
clone.number = -clone.number
token, ok := clone.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimMinus,
Text: "-",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
}, token)
continue
}
// Otherwise, append " + " to s, then serialize child and append the result to s.
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimPlus,
Text: "+",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
})
// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
if product, ok := term.(*calcProduct); ok {
token, ok := product.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, *token.Children...)
} else {
token, ok := term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
}
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &tokens,
}, true
}
func (c *calcProduct) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
tokens := make([]css_ast.Token, 0, len(c.terms)*2)
token, ok := c.terms[0].convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
for _, term := range c.terms[1:] {
// If child is an Invert node, append " / " to s, then serialize the Inverts child and append the result to s.
if invert, ok := term.(*calcInvert); ok {
token, ok := invert.term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: whitespace,
}, token)
continue
}
// Otherwise, append " * " to s, then serialize child and append the result to s.
token, ok := term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Kind: css_lexer.TDelimAsterisk,
Text: "*",
Whitespace: whitespace,
}, token)
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &tokens,
}, true
}
func (c *calcNegate) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
token, ok := c.term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &[]css_ast.Token{
{Kind: css_lexer.TNumber, Text: "-1"},
{Kind: css_lexer.TDelimSlash, Text: "*", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
token,
},
}, true
}
func (c *calcInvert) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
token, ok := c.term.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &[]css_ast.Token{
{Kind: css_lexer.TNumber, Text: "1"},
{Kind: css_lexer.TDelimSlash, Text: "/", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
token,
},
}, true
}
func (c *calcNumeric) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
text, ok := floatToStringForCalc(c.number)
if !ok {
return css_ast.Token{}, false
}
if c.unit == "" {
return css_ast.Token{
Kind: css_lexer.TNumber,
Text: text,
}, true
} else if c.unit == "%" {
return css_ast.Token{
Kind: css_lexer.TPercentage,
Text: text + "%",
}, true
} else {
return css_ast.Token{
Kind: css_lexer.TDimension,
Text: text + c.unit,
UnitOffset: uint16(len(text)),
}, true
}
}
func (c *calcValue) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
t := c.token
t.Whitespace = 0
return t, true
}
func (c *calcSum) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
// For each of roots children that are Sum nodes, replace them with their children.
terms := make([]calcTerm, 0, len(c.terms))
for _, term := range c.terms {
term = term.partiallySimplify()
if sum, ok := term.(*calcSum); ok {
terms = append(terms, sum.terms...)
} else {
terms = append(terms, term)
}
}
// For each set of roots children that are numeric values with identical units, remove
// those children and replace them with a single numeric value containing the sum of the
// removed nodes, and with the same unit. (E.g. combine numbers, combine percentages,
// combine px values, etc.)
for i := 0; i < len(terms); i++ {
term := terms[i]
if numeric, ok := term.(*calcNumeric); ok {
end := i + 1
for j := end; j < len(terms); j++ {
term2 := terms[j]
if numeric2, ok := term2.(*calcNumeric); ok && numeric2.unit == numeric.unit {
numeric.number += numeric2.number
} else {
terms[end] = term2
end++
}
}
terms = terms[:end]
}
}
// If root has only a single child at this point, return the child.
if len(terms) == 1 {
return terms[0]
}
// Otherwise, return root.
c.terms = terms
return c
}
func (c *calcProduct) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
// For each of roots children that are Product nodes, replace them with their children.
terms := make([]calcTerm, 0, len(c.terms))
for _, term := range c.terms {
term = term.partiallySimplify()
if product, ok := term.(*calcProduct); ok {
terms = append(terms, product.terms...)
} else {
terms = append(terms, term)
}
}
// If root has multiple children that are numbers (not percentages or dimensions), remove
// them and replace them with a single number containing the product of the removed nodes.
for i, term := range terms {
if numeric, ok := term.(*calcNumeric); ok && numeric.unit == "" {
end := i + 1
for j := end; j < len(terms); j++ {
term2 := terms[j]
if numeric2, ok := term2.(*calcNumeric); ok && numeric2.unit == "" {
numeric.number *= numeric2.number
} else {
terms[end] = term2
end++
}
}
terms = terms[:end]
break
}
}
// If root contains only numeric values and/or Invert nodes containing numeric values,
// and multiplying the types of all the children (noting that the type of an Invert
// node is the inverse of its childs type) results in a type that matches any of the
// types that a math function can resolve to, return the result of multiplying all the
// values of the children (noting that the value of an Invert node is the reciprocal
// of its childs value), expressed in the results canonical unit.
if len(terms) == 2 {
// Right now, only handle the case of two numbers, one of which has no unit
if first, ok := terms[0].(*calcNumeric); ok {
if second, ok := terms[1].(*calcNumeric); ok {
if first.unit == "" {
second.number *= first.number
return second
}
if second.unit == "" {
first.number *= second.number
return first
}
}
}
}
// ALGORITHM DEVIATION: Divide instead of multiply if the reciprocal is shorter
for i := 1; i < len(terms); i++ {
if numeric, ok := terms[i].(*calcNumeric); ok {
reciprocal := 1 / numeric.number
if multiply, ok := floatToStringForCalc(numeric.number); ok {
if divide, ok := floatToStringForCalc(reciprocal); ok && len(divide) < len(multiply) {
numeric.number = reciprocal
terms[i] = &calcInvert{term: numeric}
}
}
}
}
// If root has only a single child at this point, return the child.
if len(terms) == 1 {
return terms[0]
}
// Otherwise, return root.
c.terms = terms
return c
}
func (c *calcNegate) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
c.term = c.term.partiallySimplify()
// If roots child is a numeric value, return an equivalent numeric value, but with the value negated (0 - value).
if numeric, ok := c.term.(*calcNumeric); ok {
numeric.number = -numeric.number
return numeric
}
// If roots child is a Negate node, return the childs child.
if negate, ok := c.term.(*calcNegate); ok {
return negate.term
}
return c
}
func (c *calcInvert) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
c.term = c.term.partiallySimplify()
// If roots child is a number (not a percentage or dimension) return the reciprocal of the childs value.
if numeric, ok := c.term.(*calcNumeric); ok && numeric.unit == "" {
numeric.number = 1 / numeric.number
return numeric
}
// If roots child is an Invert node, return the childs child.
if invert, ok := c.term.(*calcInvert); ok {
return invert.term
}
return c
}
func (c *calcNumeric) partiallySimplify() calcTerm {
return c
}
func (c *calcValue) partiallySimplify() calcTerm {
return c
}
func tryToParseCalcTerm(tokens []css_ast.Token) calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-internal
terms := make([]calcTerm, len(tokens))
for i, token := range tokens {
var term calcTerm
if token.Kind == css_lexer.TFunction && token.Text == "var" {
// Using "var()" should bail because it can expand to any number of tokens
return nil
} else if token.Kind == css_lexer.TOpenParen || (token.Kind == css_lexer.TFunction && token.Text == "calc") {
term = tryToParseCalcTerm(*token.Children)
if term == nil {
return nil
}
} else if token.Kind == css_lexer.TNumber {
if number, err := strconv.ParseFloat(token.Text, 64); err == nil {
term = &calcNumeric{number: number}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TPercentage {
if number, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
term = &calcNumeric{number: number, unit: "%"}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TDimension {
if number, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
term = &calcNumeric{number: number, unit: token.DimensionUnit()}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "Infinity") {
term = &calcNumeric{number: math.Inf(1)}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "-Infinity") {
term = &calcNumeric{number: math.Inf(-1)}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "NaN") {
term = &calcNumeric{number: math.NaN()}
} else {
term = &calcValue{
token: token,
// From the specification: "In addition, whitespace is required on both sides of the
// + and - operators. (The * and / operators can be used without white space around them.)"
isInvalidPlusOrMinus: i > 0 && i+1 < len(tokens) &&
(token.Kind == css_lexer.TDelimPlus || token.Kind == css_lexer.TDelimMinus) &&
(((token.Whitespace&css_ast.WhitespaceBefore) == 0 && (tokens[i-1].Whitespace&css_ast.WhitespaceAfter) == 0) ||
(token.Whitespace&css_ast.WhitespaceAfter) == 0 && (tokens[i+1].Whitespace&css_ast.WhitespaceBefore) == 0),
}
}
terms[i] = term
}
// Collect children into Product and Invert nodes
first := 1
for first+1 < len(terms) {
// If this is a "*" or "/" operator
if value, ok := terms[first].(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
// Scan over the run
last := first
for last+3 < len(terms) {
if value, ok := terms[last+2].(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
last += 2
} else {
break
}
}
// Generate a node for the run
product := calcProduct{terms: make([]calcTerm, (last-first)/2+2)}
for i := range product.terms {
term := terms[first+i*2-1]
if i > 0 && terms[first+i*2-2].(*calcValue).token.Kind == css_lexer.TDelimSlash {
term = &calcInvert{term: term}
}
product.terms[i] = term
}
// Replace the run with a single node
terms[first-1] = &product
terms = append(terms[:first], terms[last+2:]...)
continue
}
first++
}
// Collect children into Sum and Negate nodes
first = 1
for first+1 < len(terms) {
// If this is a "+" or "-" operator
if value, ok := terms[first].(*calcValue); ok && !value.isInvalidPlusOrMinus &&
(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
// Scan over the run
last := first
for last+3 < len(terms) {
if value, ok := terms[last+2].(*calcValue); ok && !value.isInvalidPlusOrMinus &&
(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
last += 2
} else {
break
}
}
// Generate a node for the run
sum := calcSum{terms: make([]calcTerm, (last-first)/2+2)}
for i := range sum.terms {
term := terms[first+i*2-1]
if i > 0 && terms[first+i*2-2].(*calcValue).token.Kind == css_lexer.TDelimMinus {
term = &calcNegate{term: term}
}
sum.terms[i] = term
}
// Replace the run with a single node
terms[first-1] = &sum
terms = append(terms[:first], terms[last+2:]...)
continue
}
first++
}
// This only succeeds if everything reduces to a single term
if len(terms) == 1 {
return terms[0]
}
return nil
}